commit 520e3b3ff88656884888aa3f9e5f5a0686b64933 Author: Preetha Appan <460133+preetapan@users.noreply.github.com> Date: Mon Sep 30 10:26:20 2019 -0500 initial set of files for task driver skeleton diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f0e5c79 --- /dev/null +++ b/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..45ba444 --- /dev/null +++ b/Makefile @@ -0,0 +1,55 @@ +NAME := nomad-driver-skeleton +PKG := github.com/hashicorp/$(NAME) + +CGO_ENABLED := 0 + +# Set any default go build tags. +BUILDTAGS := +# Set an output prefix, which is the local directory if not specified +PREFIX?=$(shell pwd) + +GOOSARCHES=linux/amd64 + +GO=go + +.PHONY: build +build: $(NAME) ## Builds a dynamic executable or package. + +$(NAME): $(wildcard *.go) $(wildcard */*.go) + @echo "+ $@" + $(V)GO111MODULE=on GOOS=linux $(GO) build -tags "$(BUILDTAGS)" ${GO_LDFLAGS} -o $(NAME) ./cmd/driver/main.go + +.PHONY: static +static: ## Builds a static executable. + @echo "+ $@" + CGO_ENABLED=$(CGO_ENABLED) $(GO) build \ + -tags "$(BUILDTAGS) static_build" \ + ${GO_LDFLAGS_STATIC} -o $(NAME) . + +all: clean build fmt lint test vet + +.PHONY: fmt +fmt: ## Verifies all files have been `gofmt`ed. + @echo "+ $@" + @gofmt -s -l . | grep -v '.pb.go:' | grep -v vendor | tee /dev/stderr + +.PHONY: test +test: ## Runs the go tests. + @echo "+ $@" + @$(GO) test -v -tags "$(BUILDTAGS) cgo" $(shell $(GO) list ./... | grep -v vendor) + +.PHONY: vet +vet: ## Verifies `go vet` passes. + @echo "+ $@" + @$(GO) vet -printfuncs Error,ErrorDepth,Errorf,Errorln,Exit,ExitDepth,Exitf,Exitln,Fatal,FatalDepth,Fatalf,Fatalln,Info,InfoDepth,Infof,Infoln,Warning,WarningDepth,Warningf,Warningln -all ./... + +.PHONY: clean +clean: ## Cleanup any build binaries or packages. + @echo "+ $@" + $(RM) $(NAME) + $(RM) -r $(BUILDDIR) + +dep: + $(V)GO111MODULE=on go mod download + $(V)GO111MODULE=on go mod vendor + $(V)GO111MODULE=on go mod tidy diff --git a/README.md b/README.md new file mode 100644 index 0000000..0b236e2 --- /dev/null +++ b/README.md @@ -0,0 +1,50 @@ +# nomad-driver-skeleton +This project is a [Hashicorp Nomad](https://www.nomadproject.io/) runtime driver plugin skeleton implementation. +You can clone and modify this to get started writing your +own driver plugin. + +Note that this is only a skeleton to help you get started. The Makefile only builds +for linux/amd64 + +Requirements +------------ + +- [Nomad](https://www.nomadproject.io/downloads.html) v0.9.5+ +- [Go](https://golang.org/doc/install) v1.11+ (to build the plugin) + +Get Started +----------- + +Clone repository to: `$GOPATH/src/github.com/hashicorp/nomad-driver-skeleton + +Copy the files into your new driver's package and change package names as needed. + +```sh +$ cd $GOPATH/src/github.com/hashicorp/nomad-driver-skeleton +$ make dep +$ make build +``` + +Requirements +------------ + +[Go](http://www.golang.org) should be installed on your machine. +You will also need to correctly setup a [GOPATH](http://golang.org/doc/code.html#GOPATH) +and add `$GOPATH/bin` to your `$PATH`. + +To compile the driver plugin, run `make build`. This will build the plugin and put the task driver binary under the +NOMAD plugin dir, which by default is located under `/plugins/`. +Check Nomad `-data-dir` and `-plugin-dir` flags for more information. + +```sh +$ make build +``` + +Makefile and project module structure were inspired by [Sylab's singularity]() driver. + +Code Organization +----------------- +The skeleton driver comes with an in memory task state store (see state.go). +This in-memory map is for storing runtime details (like a process id). The details +of what to store is left to the implementation of the runtime driver. +Fields stored in the base implementation in this skeleton can be found in the `taskHandle` struct. diff --git a/cmd/driver/main.go b/cmd/driver/main.go new file mode 100644 index 0000000..433f9bc --- /dev/null +++ b/cmd/driver/main.go @@ -0,0 +1,21 @@ +// +build linux + +package main + +import ( + log "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/nomad/plugins" + "github.com/hashicorp/nomad-driver-skeleton/pkg/plugin" +) + +func main() { + // Serve the plugin + plugins.Serve(factory) +} + +// factory returns a new instance of a nomad driver plugin +func factory(log log.Logger) interface{} { + return skeleton.NewSkeletonDriver(log) +} + diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..30338d6 --- /dev/null +++ b/go.mod @@ -0,0 +1,49 @@ +module github.com/hashicorp/nomad-driver-skeleton + +require ( + github.com/LK4D4/joincontext v0.0.0-20171026170139-1724345da6d5 // indirect + github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect + github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/go-ole/go-ole v1.2.2 // indirect + github.com/golang/snappy v0.0.1 // indirect + github.com/gorhill/cronexpr v0.0.0-20140423231348-a557574d6c02 // indirect + github.com/hashicorp/consul v1.0.7 // indirect + github.com/hashicorp/go-hclog v0.8.0 + github.com/hashicorp/go-plugin v0.0.0-20190212232519-b838ffee39ce // indirect + github.com/hashicorp/go-retryablehttp v0.5.3 // indirect + github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect + github.com/hashicorp/go-uuid v1.0.1 // indirect + github.com/hashicorp/go-version v1.0.0 // indirect + github.com/hashicorp/hcl v0.0.0-20161101180025-6e968a3fcdcb // indirect + github.com/hashicorp/memberlist v0.1.3 // indirect + github.com/hashicorp/nomad v0.9.0-rc2 + github.com/hashicorp/raft v0.0.0-20190104133720-9c733b2b7f53 // indirect + github.com/hashicorp/serf v0.0.0-20180119224300-b6017ae61f44 // indirect + github.com/hashicorp/vault v1.1.0 // indirect + github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect + github.com/hpcloud/tail v0.0.0-20180514194441-a1dbeea552b7 // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/mitchellh/copystructure v0.0.0-20170525013902-d23ffcb85de3 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/mitchellh/hashstructure v0.0.0-20160118175604-1ef5c71b025a // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/pascaldekloe/goe v0.1.0 // indirect + github.com/pierrec/lz4 v2.0.5+incompatible // indirect + github.com/pkg/errors v0.8.1 // indirect + github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 // indirect + github.com/shirou/gopsutil v0.0.0-20190131151121-071446942108 // indirect + github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 // indirect + github.com/ugorji/go v0.0.0-20170620060102-0053ebfd9d0e // indirect + github.com/vmihailenco/msgpack v4.0.2+incompatible // indirect + github.com/zclconf/go-cty v0.0.0-20180718220526-02bd58e97b57 // indirect + golang.org/x/sys v0.0.0-20190220154126-629670e5acc5 // indirect + golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect + google.golang.org/appengine v1.4.0 // indirect + google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 // indirect + google.golang.org/grpc v1.18.0 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..42c6daa --- /dev/null +++ b/go.sum @@ -0,0 +1,165 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/LK4D4/joincontext v0.0.0-20171026170139-1724345da6d5 h1:U7q69tqXiCf6m097GRlNQB0/6SI1qWIOHYHhCEvDxF4= +github.com/LK4D4/joincontext v0.0.0-20171026170139-1724345da6d5/go.mod h1:nxQPcNPR/34g+HcK2hEsF99O+GJgIkW/OmPl8wtzhmk= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-ole/go-ole v1.2.2 h1:QNWhweRd9D5Py2rRVboZ2L4SEoW/dyraWJCc8bgS8kE= +github.com/go-ole/go-ole v1.2.2/go.mod h1:pnvuG7BrDMZ8ifMurTQmxwhQM/odqm9sSqNe5BUI7v4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/gorhill/cronexpr v0.0.0-20140423231348-a557574d6c02 h1:Spo+4PFAGDqULAsZ7J69MOxq4/fwgZ0zvmDTBqpq7yU= +github.com/gorhill/cronexpr v0.0.0-20140423231348-a557574d6c02/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= +github.com/hashicorp/consul v1.0.7 h1:GuHjalgSkzFdlBVxfKzlLJJ6bT4VCvAXV678UWpuJbw= +github.com/hashicorp/consul v1.0.7/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0 h1:z3ollgGRg8RjfJH6UVBaG54R70GFd++QOkvnJH3VSBY= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v0.0.0-20190212232519-b838ffee39ce h1:I3KJUf8jyMubLFeHit2ibr9YeVxJX2CXMXVM6xlB+Qk= +github.com/hashicorp/go-plugin v0.0.0-20190212232519-b838ffee39ce/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8= +github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v0.0.0-20161101180025-6e968a3fcdcb h1:S76lj//ZzHoMTiOOi3P9NcBQhoUxi4ZnYTcGqQnkCcE= +github.com/hashicorp/hcl v0.0.0-20161101180025-6e968a3fcdcb/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/nomad v0.9.0-rc2 h1:sN6Dy8ve/fwrWFSNzYSikYaXqqoUm1o4pqtJIOTaRQo= +github.com/hashicorp/nomad v0.9.0-rc2/go.mod h1:WRaKjdO1G2iqi86TvTjIYtKTyxg4pl7NLr9InxtWaI0= +github.com/hashicorp/raft v0.0.0-20190104133720-9c733b2b7f53 h1:Kcs5PApYrQyn+RLWUvk3eQhKNGDj8M/r1mt0X8p9HAA= +github.com/hashicorp/raft v0.0.0-20190104133720-9c733b2b7f53/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= +github.com/hashicorp/serf v0.0.0-20180119224300-b6017ae61f44 h1:uY64+3NuMxFyc9+nchedxCBabBhU67QF+ymrIedtexE= +github.com/hashicorp/serf v0.0.0-20180119224300-b6017ae61f44/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= +github.com/hashicorp/vault v1.1.0 h1:v79NUgO5xCZnXVzUkIqFOXtP8YhpnHAi1fk3eo9cuOE= +github.com/hashicorp/vault v1.1.0/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v0.0.0-20180514194441-a1dbeea552b7 h1:lus8hJKTrh146vNoUWNHv2F1jdtsud5ajNL0/YndJUw= +github.com/hpcloud/tail v0.0.0-20180514194441-a1dbeea552b7/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/copystructure v0.0.0-20170525013902-d23ffcb85de3 h1:dECZqiJYhKdj9QlLpiQaRDXHDXRTdiyZI3owdDGhlYY= +github.com/mitchellh/copystructure v0.0.0-20170525013902-d23ffcb85de3/go.mod h1:eOsF2yLPlBBJPvD+nhl5QMTBSOBbOph6N7j/IDUw7PY= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/hashstructure v0.0.0-20160118175604-1ef5c71b025a h1:2p2+qxgbcNvMPt/Q/Nfd0mEHJokUf7rHAYDVQH1dKpM= +github.com/mitchellh/hashstructure v0.0.0-20160118175604-1ef5c71b025a/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 h1:7YvPJVmEeFHR1Tj9sZEYsmarJEQfMVYpd/Vyy/A8dqE= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shirou/gopsutil v0.0.0-20190131151121-071446942108 h1:XXgDK65TPH+Qbo2sdYHldM5avclwThBXVYZHxroFkTQ= +github.com/shirou/gopsutil v0.0.0-20190131151121-071446942108/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/ugorji/go v0.0.0-20170620060102-0053ebfd9d0e h1:Eurc/1QbldPTy6eU1WSHOH1vuFc7BAUdKDWSsZd4ieo= +github.com/ugorji/go v0.0.0-20170620060102-0053ebfd9d0e/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/vmihailenco/msgpack v4.0.2+incompatible h1:6ujmmycMfB62Mwv2N4atpnf8CKLSzhgodqMenpELKIQ= +github.com/vmihailenco/msgpack v4.0.2+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/zclconf/go-cty v0.0.0-20180718220526-02bd58e97b57 h1:FWjfWQtNK83IJMe58FfMCSNkySe0PO9TvrQyLOKcPxw= +github.com/zclconf/go-cty v0.0.0-20180718220526-02bd58e97b57/go.mod h1:LnDKxj8gN4aatfXUqmUNooaDjvmDcLPbAN3hYBIVoJE= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953 h1:LuZIitY8waaxUfNIdtajyE/YzA/zyf0YxXG27VpLrkg= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190220154126-629670e5acc5 h1:3Nsfe5Xa1wTt01QxlAFIY5j9ycDtS+d7mhvI8ZY5bn0= +golang.org/x/sys v0.0.0-20190220154126-629670e5acc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 h1:SdCO7As+ChE1iV3IjBleIIWlj8VjZWuYEUF5pjELOOQ= +google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA= +google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/plugin/driver.go b/pkg/plugin/driver.go new file mode 100644 index 0000000..5a8e7db --- /dev/null +++ b/pkg/plugin/driver.go @@ -0,0 +1,330 @@ +package skeleton + +import ( + "context" + "fmt" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/drivers/shared/eventer" + "github.com/hashicorp/nomad/plugins/base" + "github.com/hashicorp/nomad/plugins/drivers" + "github.com/hashicorp/nomad/plugins/shared/hclspec" + structs "github.com/hashicorp/nomad/plugins/shared/structs" +) + +const ( + // pluginName is the name of the plugin + pluginName = "test-plugin" + + // fingerprintPeriod is the interval at which the driver will send fingerprint responses + fingerprintPeriod = 30 * time.Second + + // taskHandleVersion is the version of task handle which this driver sets + // and understands how to decode driver state + taskHandleVersion = 1 +) + +var ( + // pluginInfo is the response returned for the PluginInfo RPC + pluginInfo = &base.PluginInfoResponse{ + Type: base.PluginTypeDriver, + PluginApiVersions: []string{"0.1.0"}, + PluginVersion: "0.0.1", + Name: pluginName, + } + + // configSpec is the hcl specification returned by the ConfigSchema RPC + configSpec = hclspec.NewObject(map[string]*hclspec.Spec{ + "enabled": hclspec.NewDefault( + hclspec.NewAttr("enabled", "bool", false), + hclspec.NewLiteral("true"), + ), + }) + + // taskConfigSpec is the hcl specification for the driver config section of + // a taskConfig within a job. It is returned in the TaskConfigSchema RPC + taskConfigSpec = hclspec.NewObject(map[string]*hclspec.Spec{ + "strconfig": hclspec.NewAttr("strconfig", "string", true), + "boolcfg": hclspec.NewAttr("boolcfg", "bool", false), + }) + + // capabilities is returned by the Capabilities RPC and indicates what + // optional features this driver supports. This should be set according + // to the target run time + capabilities = &drivers.Capabilities{ + SendSignals: true, + Exec: false, + } +) + +// Driver is a skeleton driver implementation. The fields in this struct +// are to help plugin authors get started writing their runtime plugins. +type Driver struct { + // eventer is used to handle multiplexing of TaskEvents calls such that an + // event can be broadcast to all callers + eventer *eventer.Eventer + + // config is the driver configuration set by the SetConfig RPC + config *Config + + // nomadConfig is the client config from nomad + nomadConfig *base.ClientDriverConfig + + // tasks is the in memory datastore mapping taskIDs to rawExecDriverHandles + tasks *taskStore + + // ctx is the context for the driver. It is passed to other subsystems to + // coordinate shutdown + ctx context.Context + + // signalShutdown is called when the driver is shutting down and cancels the + // ctx passed to any subsystems + signalShutdown context.CancelFunc + + // logger will log to the Nomad agent + logger hclog.Logger +} + +// Config is the driver configuration set by the SetConfig RPC call +type Config struct { + // Enabled is set to true to enable this driver + Enabled bool `codec:"enabled"` +} + +// TaskConfig is the driver configuration of a task within a job +type TaskConfig struct { + StrConfig string `codec:"strconfig"` + BoolConfig bool `codec:"boolconfig"` +} + +// TaskState is the state which is encoded in the handle returned in +// StartTask. This information is needed to rebuild the task state and handler +// during recovery. +type TaskState struct { + TaskConfig *drivers.TaskConfig + ContainerName string + StartedAt time.Time + PID int +} + +// NewSkeletonDriver returns a new DriverPlugin implementation +func NewSkeletonDriver(logger hclog.Logger) drivers.DriverPlugin { + ctx, cancel := context.WithCancel(context.Background()) + logger = logger.Named(pluginName) + + return &Driver{ + eventer: eventer.NewEventer(ctx, logger), + config: &Config{}, + tasks: newTaskStore(), + ctx: ctx, + signalShutdown: cancel, + logger: logger, + } +} + +func (d *Driver) PluginInfo() (*base.PluginInfoResponse, error) { + return pluginInfo, nil +} + +func (d *Driver) ConfigSchema() (*hclspec.Spec, error) { + return configSpec, nil +} + +func (d *Driver) SetConfig(cfg *base.Config) error { + var config Config + if len(cfg.PluginConfig) != 0 { + if err := base.MsgPackDecode(cfg.PluginConfig, &config); err != nil { + return err + } + } + + d.config = &config + if cfg.AgentConfig != nil { + d.nomadConfig = cfg.AgentConfig.Driver + } + + return nil +} + +func (d *Driver) Shutdown(ctx context.Context) error { + d.signalShutdown() + return nil +} + +func (d *Driver) TaskConfigSchema() (*hclspec.Spec, error) { + return taskConfigSpec, nil +} + +func (d *Driver) Capabilities() (*drivers.Capabilities, error) { + return capabilities, nil +} + +func (d *Driver) Fingerprint(ctx context.Context) (<-chan *drivers.Fingerprint, error) { + ch := make(chan *drivers.Fingerprint) + go d.handleFingerprint(ctx, ch) + return ch, nil +} + +func (d *Driver) handleFingerprint(ctx context.Context, ch chan<- *drivers.Fingerprint) { + defer close(ch) + ticker := time.NewTimer(0) + for { + select { + case <-ctx.Done(): + return + case <-d.ctx.Done(): + return + case <-ticker.C: + ticker.Reset(fingerprintPeriod) + ch <- d.buildFingerprint() + } + } +} + +func (d *Driver) buildFingerprint() *drivers.Fingerprint { + var health drivers.HealthState + var desc string + attrs := map[string]*structs.Attribute{} + + // Implement finger printing logic here to populate health and driver attributes + + return &drivers.Fingerprint{ + Attributes: attrs, + Health: health, + HealthDescription: desc, + } +} + +func (d *Driver) RecoverTask(handle *drivers.TaskHandle) error { + if handle == nil { + return fmt.Errorf("error: handle cannot be nil") + } + + if _, ok := d.tasks.Get(handle.Config.ID); ok { + return nil + } + + var taskState TaskState + if err := handle.GetDriverState(&taskState); err != nil { + return fmt.Errorf("failed to decode task state from handle: %v", err) + } + + var driverConfig TaskConfig + if err := taskState.TaskConfig.DecodeDriverConfig(&driverConfig); err != nil { + return fmt.Errorf("failed to decode driver config: %v", err) + } + + // Implement driver specific way to reattach to a running task or restart + return nil +} + +// StartTask setup the task exec and calls the container excecutor +func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) { + if _, ok := d.tasks.Get(cfg.ID); ok { + return nil, nil, fmt.Errorf("task with ID %q already started", cfg.ID) + } + + var driverConfig TaskConfig + if err := cfg.DecodeDriverConfig(&driverConfig); err != nil { + return nil, nil, fmt.Errorf("failed to decode driver config: %v", err) + } + + handle := drivers.NewTaskHandle(taskHandleVersion) + handle.Config = cfg + + // Implement driver specific mechanism to start the task here + + return nil, nil, nil + +} + +func (d *Driver) WaitTask(ctx context.Context, taskID string) (<-chan *drivers.ExitResult, error) { + handle, ok := d.tasks.Get(taskID) + if !ok { + return nil, drivers.ErrTaskNotFound + } + + ch := make(chan *drivers.ExitResult) + go d.handleWait(ctx, handle, ch) + + return ch, nil +} + +func (d *Driver) handleWait(ctx context.Context, handle *taskHandle, ch chan *drivers.ExitResult) { + defer close(ch) + + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-d.ctx.Done(): + return + case <-ticker.C: + s := handle.TaskStatus() + if s.State == drivers.TaskStateExited { + ch <- handle.exitResult + } + } + } +} + +func (d *Driver) StopTask(taskID string, timeout time.Duration, signal string) error { + handle, ok := d.tasks.Get(taskID) + if !ok { + return drivers.ErrTaskNotFound + } + + if err := handle.shutdown(timeout); err != nil { + return fmt.Errorf("executor Shutdown failed: %v", err) + } + + return nil +} + +func (d *Driver) DestroyTask(taskID string, force bool) error { + handle, ok := d.tasks.Get(taskID) + if !ok { + return drivers.ErrTaskNotFound + } + + if handle.IsRunning() && !force { + return fmt.Errorf("cannot destroy running task") + } + + d.tasks.Delete(taskID) + return nil +} + +func (d *Driver) InspectTask(taskID string) (*drivers.TaskStatus, error) { + handle, ok := d.tasks.Get(taskID) + if !ok { + return nil, drivers.ErrTaskNotFound + } + + return handle.TaskStatus(), nil +} + +func (d *Driver) TaskStats(ctx context.Context, taskID string, interval time.Duration) (<-chan *drivers.TaskResourceUsage, error) { + handle, ok := d.tasks.Get(taskID) + if !ok { + return nil, drivers.ErrTaskNotFound + } + + return handle.stats(ctx, interval) +} + +func (d *Driver) TaskEvents(ctx context.Context) (<-chan *drivers.TaskEvent, error) { + return d.eventer.TaskEvents(ctx) +} + +func (d *Driver) SignalTask(taskID string, signal string) error { + return fmt.Errorf("This driver does not support signals") +} + +func (d *Driver) ExecTask(taskID string, cmd []string, timeout time.Duration) (*drivers.ExecTaskResult, error) { + return nil, fmt.Errorf("This driver does not support exec") //TODO +} diff --git a/pkg/plugin/handle.go b/pkg/plugin/handle.go new file mode 100644 index 0000000..ddeab4a --- /dev/null +++ b/pkg/plugin/handle.go @@ -0,0 +1,79 @@ +package skeleton + +import ( + "context" + "strconv" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/plugins/drivers" +) + +// taskHandle should store all relevant runtime information +// such as process ID if this is a local task or other meta +// data if this driver deals with external APIs +type taskHandle struct { + pid int + logger hclog.Logger + + // stateLock syncs access to all fields below + stateLock sync.RWMutex + + taskConfig *drivers.TaskConfig + procState drivers.TaskState + startedAt time.Time + completedAt time.Time + exitResult *drivers.ExitResult +} + +func (h *taskHandle) TaskStatus() *drivers.TaskStatus { + h.stateLock.RLock() + defer h.stateLock.RUnlock() + + return &drivers.TaskStatus{ + ID: h.taskConfig.ID, + Name: h.taskConfig.Name, + State: h.procState, + StartedAt: h.startedAt, + CompletedAt: h.completedAt, + ExitResult: h.exitResult, + DriverAttributes: map[string]string{ + "pid": strconv.Itoa(h.pid), + }, + } +} + +func (h *taskHandle) IsRunning() bool { + h.stateLock.RLock() + defer h.stateLock.RUnlock() + return h.procState == drivers.TaskStateRunning +} + +func (h *taskHandle) run() { + h.stateLock.Lock() + if h.exitResult == nil { + h.exitResult = &drivers.ExitResult{} + } + h.stateLock.Unlock() + + h.stateLock.Lock() + defer h.stateLock.Unlock() + + // Implement running the task here, this is driver specific +} + +func (h *taskHandle) stats(ctx context.Context, interval time.Duration) (<-chan *drivers.TaskResourceUsage, error) { + ch := make(chan *drivers.TaskResourceUsage) + + return ch, nil +} + +// shutdown shuts down the container, with `timeout` grace period +// before killing the container with SIGKILL. +func (h *taskHandle) shutdown(timeout time.Duration) error { + // Implement shutdown of the task. Shutdown should be tried + // gracefully first. If its past the timeout, then this method + // should do a hard shutdown and return + return nil +} diff --git a/pkg/plugin/state.go b/pkg/plugin/state.go new file mode 100644 index 0000000..266780a --- /dev/null +++ b/pkg/plugin/state.go @@ -0,0 +1,36 @@ +package skeleton + +import ( + "sync" +) + +// taskStore provides a mechanism to store and retrieve +// task handles given a string identifier. The ID should +// be unique per task +type taskStore struct { + store map[string]*taskHandle + lock sync.RWMutex +} + +func newTaskStore() *taskStore { + return &taskStore{store: map[string]*taskHandle{}} +} + +func (ts *taskStore) Set(id string, handle *taskHandle) { + ts.lock.Lock() + defer ts.lock.Unlock() + ts.store[id] = handle +} + +func (ts *taskStore) Get(id string) (*taskHandle, bool) { + ts.lock.RLock() + defer ts.lock.RUnlock() + t, ok := ts.store[id] + return t, ok +} + +func (ts *taskStore) Delete(id string) { + ts.lock.Lock() + defer ts.lock.Unlock() + delete(ts.store, id) +} diff --git a/vendor/github.com/LK4D4/joincontext/.travis.yml b/vendor/github.com/LK4D4/joincontext/.travis.yml new file mode 100644 index 0000000..e087144 --- /dev/null +++ b/vendor/github.com/LK4D4/joincontext/.travis.yml @@ -0,0 +1,13 @@ +language: go +sudo: false +go: + - 1.9 + - master +install: + - go get -t ./... + - go get github.com/golang/lint/golint +script: + - go vet ./... + - golint -set_exit_status ./... + - test -z "$(gofmt -s -l . | tee /dev/stderr)" + - go test -race -v ./... diff --git a/vendor/github.com/LK4D4/joincontext/LICENSE b/vendor/github.com/LK4D4/joincontext/LICENSE new file mode 100644 index 0000000..890c0c7 --- /dev/null +++ b/vendor/github.com/LK4D4/joincontext/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Alexander Morozov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/LK4D4/joincontext/README.md b/vendor/github.com/LK4D4/joincontext/README.md new file mode 100644 index 0000000..65417ec --- /dev/null +++ b/vendor/github.com/LK4D4/joincontext/README.md @@ -0,0 +1,39 @@ +# joincontext + +[![Build Status](https://travis-ci.org/LK4D4/joincontext.svg?branch=master)](https://travis-ci.org/LK4D4/joincontext) +[![GoDoc](https://godoc.org/github.com/LK4D4/joincontext?status.svg)](https://godoc.org/github.com/LK4D4/joincontext) + +Package joincontext provides a way to combine two contexts. +For example it might be useful for grpc server to cancel all handlers in +addition to provided handler context. + +For additional info see [godoc page](https://godoc.org/github.com/LK4D4/joincontext) + +## Example +```go + ctx1, cancel1 := context.WithCancel(context.Background()) + defer cancel1() + ctx2 := context.Background() + + ctx, cancel := joincontext.Join(ctx1, ctx2) + defer cancel() + select { + case <-ctx.Done(): + default: + fmt.Println("context alive") + } + + cancel1() + + // give some time to propagate + time.Sleep(100 * time.Millisecond) + + select { + case <-ctx.Done(): + fmt.Println(ctx.Err()) + default: + } + + // Output: context alive + // context canceled +``` diff --git a/vendor/github.com/LK4D4/joincontext/context.go b/vendor/github.com/LK4D4/joincontext/context.go new file mode 100644 index 0000000..9d5783b --- /dev/null +++ b/vendor/github.com/LK4D4/joincontext/context.go @@ -0,0 +1,102 @@ +// Package joincontext provides a way to combine two contexts. +// For example it might be useful for grpc server to cancel all handlers in +// addition to provided handler context. +package joincontext + +import ( + "sync" + "time" + + "golang.org/x/net/context" +) + +type joinContext struct { + mu sync.Mutex + ctx1 context.Context + ctx2 context.Context + done chan struct{} + err error +} + +// Join returns new context which is child for two passed contexts. +// It starts new goroutine which tracks both contexts. +// +// Done() channel is closed when one of parents contexts is done. +// +// Deadline() returns earliest deadline between parent contexts. +// +// Err() returns error from first done parent context. +// +// Value(key) looks for key in parent contexts. First found is returned. +func Join(ctx1, ctx2 context.Context) (context.Context, context.CancelFunc) { + c := &joinContext{ctx1: ctx1, ctx2: ctx2, done: make(chan struct{})} + go c.run() + return c, c.cancel +} + +func (c *joinContext) Deadline() (deadline time.Time, ok bool) { + d1, ok1 := c.ctx1.Deadline() + if !ok1 { + return c.ctx2.Deadline() + } + d2, ok2 := c.ctx2.Deadline() + if !ok2 { + return d1, true + } + + if d2.Before(d1) { + return d2, true + } + return d1, true +} + +func (c *joinContext) Done() <-chan struct{} { + return c.done +} + +func (c *joinContext) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *joinContext) Value(key interface{}) interface{} { + v := c.ctx1.Value(key) + if v == nil { + v = c.ctx2.Value(key) + } + return v +} + +func (c *joinContext) run() { + var doneCtx context.Context + select { + case <-c.ctx1.Done(): + doneCtx = c.ctx1 + case <-c.ctx2.Done(): + doneCtx = c.ctx2 + case <-c.done: + return + } + + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = doneCtx.Err() + c.mu.Unlock() + close(c.done) +} + +func (c *joinContext) cancel() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = context.Canceled + + c.mu.Unlock() + close(c.done) +} diff --git a/vendor/github.com/StackExchange/wmi/LICENSE b/vendor/github.com/StackExchange/wmi/LICENSE new file mode 100644 index 0000000..ae80b67 --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Stack Exchange + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/StackExchange/wmi/README.md b/vendor/github.com/StackExchange/wmi/README.md new file mode 100644 index 0000000..426d1a4 --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/README.md @@ -0,0 +1,6 @@ +wmi +=== + +Package wmi provides a WQL interface to Windows WMI. + +Note: It interfaces with WMI on the local machine, therefore it only runs on Windows. diff --git a/vendor/github.com/StackExchange/wmi/swbemservices.go b/vendor/github.com/StackExchange/wmi/swbemservices.go new file mode 100644 index 0000000..9765a53 --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/swbemservices.go @@ -0,0 +1,260 @@ +// +build windows + +package wmi + +import ( + "fmt" + "reflect" + "runtime" + "sync" + + "github.com/go-ole/go-ole" + "github.com/go-ole/go-ole/oleutil" +) + +// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx +type SWbemServices struct { + //TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance + cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method + sWbemLocatorIUnknown *ole.IUnknown + sWbemLocatorIDispatch *ole.IDispatch + queries chan *queryRequest + closeError chan error + lQueryorClose sync.Mutex +} + +type queryRequest struct { + query string + dst interface{} + args []interface{} + finished chan error +} + +// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI +func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) { + //fmt.Println("InitializeSWbemServices: Starting") + //TODO: implement connectServerArgs as optional argument for init with connectServer call + s := new(SWbemServices) + s.cWMIClient = c + s.queries = make(chan *queryRequest) + initError := make(chan error) + go s.process(initError) + + err, ok := <-initError + if ok { + return nil, err //Send error to caller + } + //fmt.Println("InitializeSWbemServices: Finished") + return s, nil +} + +// Close will clear and release all of the SWbemServices resources +func (s *SWbemServices) Close() error { + s.lQueryorClose.Lock() + if s == nil || s.sWbemLocatorIDispatch == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices is not Initialized") + } + if s.queries == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices has been closed") + } + //fmt.Println("Close: sending close request") + var result error + ce := make(chan error) + s.closeError = ce //Race condition if multiple callers to close. May need to lock here + close(s.queries) //Tell background to shut things down + s.lQueryorClose.Unlock() + err, ok := <-ce + if ok { + result = err + } + //fmt.Println("Close: finished") + return result +} + +func (s *SWbemServices) process(initError chan error) { + //fmt.Println("process: starting background thread initialization") + //All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine + runtime.LockOSThread() + defer runtime.LockOSThread() + + err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err != nil { + oleCode := err.(*ole.OleError).Code() + if oleCode != ole.S_OK && oleCode != S_FALSE { + initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err) + return + } + } + defer ole.CoUninitialize() + + unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") + if err != nil { + initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err) + return + } else if unknown == nil { + initError <- ErrNilCreateObject + return + } + defer unknown.Release() + s.sWbemLocatorIUnknown = unknown + + dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch) + if err != nil { + initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err) + return + } + defer dispatch.Release() + s.sWbemLocatorIDispatch = dispatch + + // we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs + //fmt.Println("process: initialized. closing initError") + close(initError) + //fmt.Println("process: waiting for queries") + for q := range s.queries { + //fmt.Printf("process: new query: len(query)=%d\n", len(q.query)) + errQuery := s.queryBackground(q) + //fmt.Println("process: s.queryBackground finished") + if errQuery != nil { + q.finished <- errQuery + } + close(q.finished) + } + //fmt.Println("process: queries channel closed") + s.queries = nil //set channel to nil so we know it is closed + //TODO: I think the Release/Clear calls can panic if things are in a bad state. + //TODO: May need to recover from panics and send error to method caller instead. + close(s.closeError) +} + +// Query runs the WQL query using a SWbemServices instance and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. +func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + s.lQueryorClose.Lock() + if s == nil || s.sWbemLocatorIDispatch == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices is not Initialized") + } + if s.queries == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices has been closed") + } + + //fmt.Println("Query: Sending query request") + qr := queryRequest{ + query: query, + dst: dst, + args: connectServerArgs, + finished: make(chan error), + } + s.queries <- &qr + s.lQueryorClose.Unlock() + err, ok := <-qr.finished + if ok { + //fmt.Println("Query: Finished with error") + return err //Send error to caller + } + //fmt.Println("Query: Finished") + return nil +} + +func (s *SWbemServices) queryBackground(q *queryRequest) error { + if s == nil || s.sWbemLocatorIDispatch == nil { + return fmt.Errorf("SWbemServices is not Initialized") + } + wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart + //fmt.Println("queryBackground: Starting") + + dv := reflect.ValueOf(q.dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType := checkMultiArg(dv) + if mat == multiArgTypeInvalid { + return ErrInvalidEntityType + } + + // service is a SWbemServices + serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...) + if err != nil { + return err + } + service := serviceRaw.ToIDispatch() + defer serviceRaw.Clear() + + // result is a SWBemObjectSet + resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query) + if err != nil { + return err + } + result := resultRaw.ToIDispatch() + defer resultRaw.Clear() + + count, err := oleInt64(result, "Count") + if err != nil { + return err + } + + enumProperty, err := result.GetProperty("_NewEnum") + if err != nil { + return err + } + defer enumProperty.Clear() + + enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + if enum == nil { + return fmt.Errorf("can't get IEnumVARIANT, enum is nil") + } + defer enum.Release() + + // Initialize a slice with Count capacity + dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) + + var errFieldMismatch error + for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { + if err != nil { + return err + } + + err := func() error { + // item is a SWbemObject, but really a Win32_Process + item := itemRaw.ToIDispatch() + defer item.Release() + + ev := reflect.New(elemType) + if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + return nil + }() + if err != nil { + return err + } + } + //fmt.Println("queryBackground: Finished") + return errFieldMismatch +} diff --git a/vendor/github.com/StackExchange/wmi/wmi.go b/vendor/github.com/StackExchange/wmi/wmi.go new file mode 100644 index 0000000..a951b12 --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/wmi.go @@ -0,0 +1,486 @@ +// +build windows + +/* +Package wmi provides a WQL interface for WMI on Windows. + +Example code to print names of running processes: + + type Win32_Process struct { + Name string + } + + func main() { + var dst []Win32_Process + q := wmi.CreateQuery(&dst, "") + err := wmi.Query(q, &dst) + if err != nil { + log.Fatal(err) + } + for i, v := range dst { + println(i, v.Name) + } + } + +*/ +package wmi + +import ( + "bytes" + "errors" + "fmt" + "log" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-ole/go-ole" + "github.com/go-ole/go-ole/oleutil" +) + +var l = log.New(os.Stdout, "", log.LstdFlags) + +var ( + ErrInvalidEntityType = errors.New("wmi: invalid entity type") + // ErrNilCreateObject is the error returned if CreateObject returns nil even + // if the error was nil. + ErrNilCreateObject = errors.New("wmi: create object returned nil") + lock sync.Mutex +) + +// S_FALSE is returned by CoInitializeEx if it was already called on this thread. +const S_FALSE = 0x00000001 + +// QueryNamespace invokes Query with the given namespace on the local machine. +func QueryNamespace(query string, dst interface{}, namespace string) error { + return Query(query, dst, nil, namespace) +} + +// Query runs the WQL query and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. +// +// Query is a wrapper around DefaultClient.Query. +func Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + if DefaultClient.SWbemServicesClient == nil { + return DefaultClient.Query(query, dst, connectServerArgs...) + } + return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...) +} + +// A Client is an WMI query client. +// +// Its zero value (DefaultClient) is a usable client. +type Client struct { + // NonePtrZero specifies if nil values for fields which aren't pointers + // should be returned as the field types zero value. + // + // Setting this to true allows stucts without pointer fields to be used + // without the risk failure should a nil value returned from WMI. + NonePtrZero bool + + // PtrNil specifies if nil values for pointer fields should be returned + // as nil. + // + // Setting this to true will set pointer fields to nil where WMI + // returned nil, otherwise the types zero value will be returned. + PtrNil bool + + // AllowMissingFields specifies that struct fields not present in the + // query result should not result in an error. + // + // Setting this to true allows custom queries to be used with full + // struct definitions instead of having to define multiple structs. + AllowMissingFields bool + + // SWbemServiceClient is an optional SWbemServices object that can be + // initialized and then reused across multiple queries. If it is null + // then the method will initialize a new temporary client each time. + SWbemServicesClient *SWbemServices +} + +// DefaultClient is the default Client and is used by Query, QueryNamespace +var DefaultClient = &Client{} + +// Query runs the WQL query and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. +func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + dv := reflect.ValueOf(dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType := checkMultiArg(dv) + if mat == multiArgTypeInvalid { + return ErrInvalidEntityType + } + + lock.Lock() + defer lock.Unlock() + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err != nil { + oleCode := err.(*ole.OleError).Code() + if oleCode != ole.S_OK && oleCode != S_FALSE { + return err + } + } + defer ole.CoUninitialize() + + unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") + if err != nil { + return err + } else if unknown == nil { + return ErrNilCreateObject + } + defer unknown.Release() + + wmi, err := unknown.QueryInterface(ole.IID_IDispatch) + if err != nil { + return err + } + defer wmi.Release() + + // service is a SWbemServices + serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...) + if err != nil { + return err + } + service := serviceRaw.ToIDispatch() + defer serviceRaw.Clear() + + // result is a SWBemObjectSet + resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query) + if err != nil { + return err + } + result := resultRaw.ToIDispatch() + defer resultRaw.Clear() + + count, err := oleInt64(result, "Count") + if err != nil { + return err + } + + enumProperty, err := result.GetProperty("_NewEnum") + if err != nil { + return err + } + defer enumProperty.Clear() + + enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + if enum == nil { + return fmt.Errorf("can't get IEnumVARIANT, enum is nil") + } + defer enum.Release() + + // Initialize a slice with Count capacity + dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) + + var errFieldMismatch error + for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { + if err != nil { + return err + } + + err := func() error { + // item is a SWbemObject, but really a Win32_Process + item := itemRaw.ToIDispatch() + defer item.Release() + + ev := reflect.New(elemType) + if err = c.loadEntity(ev.Interface(), item); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + return nil + }() + if err != nil { + return err + } + } + return errFieldMismatch +} + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. +// StructType is the type of the struct pointed to by the destination argument. +type ErrFieldMismatch struct { + StructType reflect.Type + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("wmi: cannot load field %q into a %q: %s", + e.FieldName, e.StructType, e.Reason) +} + +var timeType = reflect.TypeOf(time.Time{}) + +// loadEntity loads a SWbemObject into a struct pointer. +func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) { + v := reflect.ValueOf(dst).Elem() + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + of := f + isPtr := f.Kind() == reflect.Ptr + if isPtr { + ptr := reflect.New(f.Type().Elem()) + f.Set(ptr) + f = f.Elem() + } + n := v.Type().Field(i).Name + if !f.CanSet() { + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "CanSet() is false", + } + } + prop, err := oleutil.GetProperty(src, n) + if err != nil { + if !c.AllowMissingFields { + errFieldMismatch = &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "no such struct field", + } + } + continue + } + defer prop.Clear() + + switch val := prop.Value().(type) { + case int8, int16, int32, int64, int: + v := reflect.ValueOf(val).Int() + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + f.SetInt(v) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + f.SetUint(uint64(v)) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not an integer class", + } + } + case uint8, uint16, uint32, uint64: + v := reflect.ValueOf(val).Uint() + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + f.SetInt(int64(v)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + f.SetUint(v) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not an integer class", + } + } + case string: + switch f.Kind() { + case reflect.String: + f.SetString(val) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + iv, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return err + } + f.SetInt(iv) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + uv, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return err + } + f.SetUint(uv) + case reflect.Struct: + switch f.Type() { + case timeType: + if len(val) == 25 { + mins, err := strconv.Atoi(val[22:]) + if err != nil { + return err + } + val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60) + } + t, err := time.Parse("20060102150405.000000-0700", val) + if err != nil { + return err + } + f.Set(reflect.ValueOf(t)) + } + } + case bool: + switch f.Kind() { + case reflect.Bool: + f.SetBool(val) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a bool", + } + } + case float32: + switch f.Kind() { + case reflect.Float32: + f.SetFloat(float64(val)) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a Float32", + } + } + default: + if f.Kind() == reflect.Slice { + switch f.Type().Elem().Kind() { + case reflect.String: + safeArray := prop.ToArray() + if safeArray != nil { + arr := safeArray.ToValueArray() + fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) + for i, v := range arr { + s := fArr.Index(i) + s.SetString(v.(string)) + } + f.Set(fArr) + } + case reflect.Uint8: + safeArray := prop.ToArray() + if safeArray != nil { + arr := safeArray.ToValueArray() + fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) + for i, v := range arr { + s := fArr.Index(i) + s.SetUint(reflect.ValueOf(v).Uint()) + } + f.Set(fArr) + } + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: fmt.Sprintf("unsupported slice type (%T)", val), + } + } + } else { + typeof := reflect.TypeOf(val) + if typeof == nil && (isPtr || c.NonePtrZero) { + if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) { + of.Set(reflect.Zero(of.Type())) + } + break + } + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: fmt.Sprintf("unsupported type (%T)", val), + } + } + } + } + return errFieldMismatch +} + +type multiArgType int + +const ( + multiArgTypeInvalid multiArgType = iota + multiArgTypeStruct + multiArgTypeStructPtr +) + +// checkMultiArg checks that v has type []S, []*S for some struct type S. +// +// It returns what category the slice's elements are, and the reflect.Type +// that represents S. +func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { + if v.Kind() != reflect.Slice { + return multiArgTypeInvalid, nil + } + elemType = v.Type().Elem() + switch elemType.Kind() { + case reflect.Struct: + return multiArgTypeStruct, elemType + case reflect.Ptr: + elemType = elemType.Elem() + if elemType.Kind() == reflect.Struct { + return multiArgTypeStructPtr, elemType + } + } + return multiArgTypeInvalid, nil +} + +func oleInt64(item *ole.IDispatch, prop string) (int64, error) { + v, err := oleutil.GetProperty(item, prop) + if err != nil { + return 0, err + } + defer v.Clear() + + i := int64(v.Val) + return i, nil +} + +// CreateQuery returns a WQL query string that queries all columns of src. where +// is an optional string that is appended to the query, to be used with WHERE +// clauses. In such a case, the "WHERE" string should appear at the beginning. +func CreateQuery(src interface{}, where string) string { + var b bytes.Buffer + b.WriteString("SELECT ") + s := reflect.Indirect(reflect.ValueOf(src)) + t := s.Type() + if s.Kind() == reflect.Slice { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return "" + } + var fields []string + for i := 0; i < t.NumField(); i++ { + fields = append(fields, t.Field(i).Name) + } + b.WriteString(strings.Join(fields, ", ")) + b.WriteString(" FROM ") + b.WriteString(t.Name()) + b.WriteString(" " + where) + return b.String() +} diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore new file mode 100644 index 0000000..8c03ec1 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +/metrics.out diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE new file mode 100644 index 0000000..106569e --- /dev/null +++ b/vendor/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md new file mode 100644 index 0000000..aa73348 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/README.md @@ -0,0 +1,91 @@ +go-metrics +========== + +This library provides a `metrics` package which can be used to instrument code, +expose application metrics, and profile runtime performance in a flexible manner. + +Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) + +Sinks +----- + +The `metrics` package makes use of a `MetricSink` interface to support delivery +to any type of backend. Currently the following sinks are provided: + +* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) +* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) +* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) +* InmemSink : Provides in-memory aggregation, can be used to export stats +* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. +* BlackholeSink : Sinks to nowhere + +In addition to the sinks, the `InmemSignal` can be used to catch a signal, +and dump a formatted output of recent metrics. For example, when a process gets +a SIGUSR1, it can dump to stderr recent performance metrics for debugging. + +Labels +------ + +Most metrics do have an equivalent ending with `WithLabels`, such methods +allow to push metrics with labels and use some features of underlying Sinks +(ex: translated into Prometheus labels). + +Since some of these labels may increase greatly cardinality of metrics, the +library allow to filter labels using a blacklist/whitelist filtering system +which is global to all metrics. + +* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default. +* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks. + +By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that +no tags are filetered at all, but it allow to a user to globally block some tags with high +cardinality at application level. + +Examples +-------- + +Here is an example of using the package: + +```go +func SlowMethod() { + // Profiling the runtime of a method + defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) +} + +// Configure a statsite sink as the global metrics sink +sink, _ := metrics.NewStatsiteSink("statsite:8125") +metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + +// Emit a Key/Value pair +metrics.EmitKey([]string{"questions", "meaning of life"}, 42) +``` + +Here is an example of setting up a signal handler: + +```go +// Setup the inmem sink and signal handler +inm := metrics.NewInmemSink(10*time.Second, time.Minute) +sig := metrics.DefaultInmemSignal(inm) +metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) + +// Run some code +inm.SetGauge([]string{"foo"}, 42) +inm.EmitKey([]string{"bar"}, 30) + +inm.IncrCounter([]string{"baz"}, 42) +inm.IncrCounter([]string{"baz"}, 1) +inm.IncrCounter([]string{"baz"}, 80) + +inm.AddSample([]string{"method", "wow"}, 42) +inm.AddSample([]string{"method", "wow"}, 100) +inm.AddSample([]string{"method", "wow"}, 22) + +.... +``` + +When a signal comes in, output like the following will be dumped to stderr: + + [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 + [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 + [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 + [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 \ No newline at end of file diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 0000000..31098dd --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 0000000..38136af --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go new file mode 100644 index 0000000..4e2d6a7 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,348 @@ +package metrics + +import ( + "bytes" + "fmt" + "math" + "net/url" + "strings" + "sync" + "time" +) + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex + + rateDenom float64 +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]GaugeValue + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]SampledValue + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]SampledValue +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]GaugeValue), + Points: make(map[string][]float32), + Counters: make(map[string]SampledValue), + Samples: make(map[string]SampledValue), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Rate float64 // The values rate per time unit (usually 1 second) + Sum float64 // The sum of values + SumSq float64 `json:"-"` // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value + LastUpdated time.Time `json:"-"` // When value was last updated +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64, rateDenom float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } + a.Rate = float64(a.Sum) / rateDenom + a.LastUpdated = time.Now() +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) + } +} + +// NewInmemSinkFromURL creates an InmemSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) { + params := u.Query() + + interval, err := time.ParseDuration(params.Get("interval")) + if err != nil { + return nil, fmt.Errorf("Bad 'interval' param: %s", err) + } + + retain, err := time.ParseDuration(params.Get("retain")) + if err != nil { + return nil, fmt.Errorf("Bad 'retain' param: %s", err) + } + + return NewInmemSink(interval, retain), nil +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + rateTimeUnit := time.Second + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + i.SetGaugeWithLabels(key, val, nil) +} + +func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels} +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + i.IncrCounterWithLabels(key, val, nil) +} + +func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Counters[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Counters[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + i.AddSampleWithLabels(key, val, nil) +} + +func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Samples[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Samples[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + intervals := make([]*IntervalMetrics, n) + + copy(intervals[:n-1], i.intervals[:n-1]) + current := i.intervals[n-1] + + // make its own copy for current interval + intervals[n-1] = &IntervalMetrics{} + copyCurrent := intervals[n-1] + current.RLock() + *copyCurrent = *current + + copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges)) + for k, v := range current.Gauges { + copyCurrent.Gauges[k] = v + } + // saved values will be not change, just copy its link + copyCurrent.Points = make(map[string][]float32, len(current.Points)) + for k, v := range current.Points { + copyCurrent.Points[k] = v + } + copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) + for k, v := range current.Counters { + copyCurrent.Counters[k] = v + } + copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) + for k, v := range current.Samples { + copyCurrent.Samples[k] = v + } + current.RUnlock() + + return intervals +} + +func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + return nil +} + +func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Check for an existing interval + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + // Add the current interval + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + n++ + + // Truncate the intervals if they are too long + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// getInterval returns the current interval to write to +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + if m := i.getExistingInterval(intv); m != nil { + return m + } + return i.createInterval(intv) +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + return buf.String() +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + key := buf.String() + + for _, label := range labels { + replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) + } + + return buf.String(), key +} diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go new file mode 100644 index 0000000..504f1b3 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go @@ -0,0 +1,118 @@ +package metrics + +import ( + "fmt" + "net/http" + "sort" + "time" +) + +// MetricsSummary holds a roll-up of metrics info for a given interval +type MetricsSummary struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +type GaugeValue struct { + Name string + Hash string `json:"-"` + Value float32 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type PointValue struct { + Name string + Points []float32 +} + +type SampledValue struct { + Name string + Hash string `json:"-"` + *AggregateSample + Mean float64 + Stddev float64 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +// DisplayMetrics returns a summary of the metrics from the most recent finished interval. +func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + data := i.Data() + + var interval *IntervalMetrics + n := len(data) + switch { + case n == 0: + return nil, fmt.Errorf("no metric intervals have been initialized yet") + case n == 1: + // Show the current interval if it's all we have + interval = i.intervals[0] + default: + // Show the most recent finished interval if we have one + interval = i.intervals[n-2] + } + + summary := MetricsSummary{ + Timestamp: interval.Interval.Round(time.Second).UTC().String(), + Gauges: make([]GaugeValue, 0, len(interval.Gauges)), + Points: make([]PointValue, 0, len(interval.Points)), + } + + // Format and sort the output of each metric type, so it gets displayed in a + // deterministic order. + for name, points := range interval.Points { + summary.Points = append(summary.Points, PointValue{name, points}) + } + sort.Slice(summary.Points, func(i, j int) bool { + return summary.Points[i].Name < summary.Points[j].Name + }) + + for hash, value := range interval.Gauges { + value.Hash = hash + value.DisplayLabels = make(map[string]string) + for _, label := range value.Labels { + value.DisplayLabels[label.Name] = label.Value + } + value.Labels = nil + + summary.Gauges = append(summary.Gauges, value) + } + sort.Slice(summary.Gauges, func(i, j int) bool { + return summary.Gauges[i].Hash < summary.Gauges[j].Hash + }) + + summary.Counters = formatSamples(interval.Counters) + summary.Samples = formatSamples(interval.Samples) + + return summary, nil +} + +func formatSamples(source map[string]SampledValue) []SampledValue { + output := make([]SampledValue, 0, len(source)) + for hash, sample := range source { + displayLabels := make(map[string]string) + for _, label := range sample.Labels { + displayLabels[label.Name] = label.Value + } + + output = append(output, SampledValue{ + Name: sample.Name, + Hash: hash, + AggregateSample: sample.AggregateSample, + Mean: sample.AggregateSample.Mean(), + Stddev: sample.AggregateSample.Stddev(), + DisplayLabels: displayLabels, + }) + } + sort.Slice(output, func(i, j int) bool { + return output[i].Hash < output[j].Hash + }) + + return output +} diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 0000000..0937f4a --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,117 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "strings" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for j := 0; j < len(data)-1; j++ { + intv := data[j] + intv.RLock() + for _, val := range intv.Gauges { + name := i.flattenLabels(val.Name, val.Labels) + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for _, agg := range intv.Counters { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + for _, agg := range intv.Samples { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSignal) flattenLabels(name string, labels []Label) string { + buf := bytes.NewBufferString(name) + replacer := strings.NewReplacer(" ", "_", ":", "_") + + for _, label := range labels { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, label.Value) + } + + return buf.String() +} diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go new file mode 100644 index 0000000..cf9def7 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,278 @@ +package metrics + +import ( + "runtime" + "strings" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +type Label struct { + Name string + Value string +} + +func (m *Metrics) SetGauge(key []string, val float32) { + m.SetGaugeWithLabels(key, val, nil) +} + +func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" { + if m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } else if m.EnableHostname { + key = insert(0, m.HostName, key) + } + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.SetGaugeWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + allowed, _ := m.allowMetric(key, nil) + if !allowed { + return + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + m.IncrCounterWithLabels(key, val, nil) +} + +func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.IncrCounterWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) AddSample(key []string, val float32) { + m.AddSampleWithLabels(key, val, nil) +} + +func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.AddSampleWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + m.MeasureSinceWithLabels(key, start, nil) +} + +func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSampleWithLabels(key, msec, labelsFiltered) +} + +// UpdateFilter overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilter(allow, block []string) { + m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels) +} + +// UpdateFilterAndLabels overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + m.filterLock.Lock() + defer m.filterLock.Unlock() + + m.AllowedPrefixes = allow + m.BlockedPrefixes = block + + if allowedLabels == nil { + // Having a white list means we take only elements from it + m.allowedLabels = nil + } else { + m.allowedLabels = make(map[string]bool) + for _, v := range allowedLabels { + m.allowedLabels[v] = true + } + } + m.blockedLabels = make(map[string]bool) + for _, v := range blockedLabels { + m.blockedLabels[v] = true + } + m.AllowedLabels = allowedLabels + m.BlockedLabels = blockedLabels + + m.filter = iradix.New() + for _, prefix := range m.AllowedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), true) + } + for _, prefix := range m.BlockedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), false) + } +} + +// labelIsAllowed return true if a should be included in metric +// the caller should lock m.filterLock while calling this method +func (m *Metrics) labelIsAllowed(label *Label) bool { + labelName := (*label).Name + if m.blockedLabels != nil { + _, ok := m.blockedLabels[labelName] + if ok { + // If present, let's remove this label + return false + } + } + if m.allowedLabels != nil { + _, ok := m.allowedLabels[labelName] + return ok + } + // Allow by default + return true +} + +// filterLabels return only allowed labels +// the caller should lock m.filterLock while calling this method +func (m *Metrics) filterLabels(labels []Label) []Label { + if labels == nil { + return nil + } + toReturn := labels[:0] + for _, label := range labels { + if m.labelIsAllowed(&label) { + toReturn = append(toReturn, label) + } + } + return toReturn +} + +// Returns whether the metric should be allowed based on configured prefix filters +// Also return the applicable labels +func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) { + m.filterLock.RLock() + defer m.filterLock.RUnlock() + + if m.filter == nil || m.filter.Len() == 0 { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, "."))) + if !ok { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + return allowed.(bool), m.filterLabels(labels) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.emitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) emitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Inserts a string value at an index into the slice +func insert(i int, v string, s []string) []string { + s = append(s, "") + copy(s[i+1:], s[i:]) + s[i] = v + return s +} diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go new file mode 100644 index 0000000..0b7d6e4 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/sink.go @@ -0,0 +1,115 @@ +package metrics + +import ( + "fmt" + "net/url" +) + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + SetGaugeWithLabels(key []string, val float32, labels []Label) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + IncrCounterWithLabels(key []string, val float32, labels []Label) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) + AddSampleWithLabels(key []string, val float32, labels []Label) +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} +func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + fh.SetGaugeWithLabels(key, val, nil) +} + +func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.SetGaugeWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + fh.IncrCounterWithLabels(key, val, nil) +} + +func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.IncrCounterWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + fh.AddSampleWithLabels(key, val, nil) +} + +func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.AddSampleWithLabels(key, val, labels) + } +} + +// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided +// by each sink type +type sinkURLFactoryFunc func(*url.URL) (MetricSink, error) + +// sinkRegistry supports the generic NewMetricSink function by mapping URL +// schemes to metric sink factory functions +var sinkRegistry = map[string]sinkURLFactoryFunc{ + "statsd": NewStatsdSinkFromURL, + "statsite": NewStatsiteSinkFromURL, + "inmem": NewInmemSinkFromURL, +} + +// NewMetricSinkFromURL allows a generic URL input to configure any of the +// supported sinks. The scheme of the URL identifies the type of the sink, the +// and query parameters are used to set options. +// +// "statsd://" - Initializes a StatsdSink. The host and port are passed through +// as the "addr" of the sink +// +// "statsite://" - Initializes a StatsiteSink. The host and port become the +// "addr" of the sink +// +// "inmem://" - Initializes an InmemSink. The host and port are ignored. The +// "interval" and "duration" query parameters must be specified with valid +// durations, see NewInmemSink for details. +func NewMetricSinkFromURL(urlStr string) (MetricSink, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + sinkURLFactoryFunc := sinkRegistry[u.Scheme] + if sinkURLFactoryFunc == nil { + return nil, fmt.Errorf( + "cannot create metric sink, unrecognized sink name: %q", u.Scheme) + } + + return sinkURLFactoryFunc(u) +} diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go new file mode 100644 index 0000000..32a28c4 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/start.go @@ -0,0 +1,141 @@ +package metrics + +import ( + "os" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to separate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableHostnameLabel bool // Enable adding hostname to labels + EnableServiceLabel bool // Enable adding service to labels + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics + + AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator + BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator + AllowedLabels []string // A list of metric labels to allow, with '.' as the separator + BlockedLabels []string // A list of metric labels to block, with '.' as the separator + FilterDefault bool // Whether to allow metrics by default +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink + filter *iradix.Tree + allowedLabels map[string]bool + blockedLabels map[string]bool + filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access +} + +// Shared global metrics instance +var globalMetrics atomic.Value // *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + FilterDefault: true, // Don't filter metrics by default + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels) + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics.Store(metrics) + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.Load().(*Metrics).SetGauge(key, val) +} + +func SetGaugeWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels) +} + +func EmitKey(key []string, val float32) { + globalMetrics.Load().(*Metrics).EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.Load().(*Metrics).IncrCounter(key, val) +} + +func IncrCounterWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels) +} + +func AddSample(key []string, val float32) { + globalMetrics.Load().(*Metrics).AddSample(key, val) +} + +func AddSampleWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.Load().(*Metrics).MeasureSince(key, start) +} + +func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels) +} + +func UpdateFilter(allow, block []string) { + globalMetrics.Load().(*Metrics).UpdateFilter(allow, block) +} + +// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels +// and blockedLabels - when not nil - allow filtering of labels in order to +// block/allow globally labels (especially useful when having large number of +// values for a given label). See README.md for more information about usage. +func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels) +} diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go new file mode 100644 index 0000000..1bfffce --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,184 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsdSink(u.Host) +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go new file mode 100644 index 0000000..6c0d284 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,172 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsiteSink(u.Host) +} + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/go-ole/go-ole/.travis.yml b/vendor/github.com/go-ole/go-ole/.travis.yml new file mode 100644 index 0000000..0c2c02b --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/.travis.yml @@ -0,0 +1,9 @@ +language: go +sudo: false + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - tip diff --git a/vendor/github.com/go-ole/go-ole/ChangeLog.md b/vendor/github.com/go-ole/go-ole/ChangeLog.md new file mode 100644 index 0000000..4ba6a8c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ChangeLog.md @@ -0,0 +1,49 @@ +# Version 1.x.x + +* **Add more test cases and reference new test COM server project.** (Placeholder for future additions) + +# Version 1.2.0-alphaX + +**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.** + + * Added CI configuration for Travis-CI and AppVeyor. + * Added test InterfaceID and ClassID for the COM Test Server project. + * Added more inline documentation (#83). + * Added IEnumVARIANT implementation (#88). + * Added IEnumVARIANT test cases (#99, #100, #101). + * Added support for retrieving `time.Time` from VARIANT (#92). + * Added test case for IUnknown (#64). + * Added test case for IDispatch (#64). + * Added test cases for scalar variants (#64, #76). + +# Version 1.1.1 + + * Fixes for Linux build. + * Fixes for Windows build. + +# Version 1.1.0 + +The change to provide building on all platforms is a new feature. The increase in minor version reflects that and allows those who wish to stay on 1.0.x to continue to do so. Support for 1.0.x will be limited to bug fixes. + + * Move GUID out of variables.go into its own file to make new documentation available. + * Move OleError out of ole.go into its own file to make new documentation available. + * Add documentation to utility functions. + * Add documentation to variant receiver functions. + * Add documentation to ole structures. + * Make variant available to other systems outside of Windows. + * Make OLE structures available to other systems outside of Windows. + +## New Features + + * Library should now be built on all platforms supported by Go. Library will NOOP on any platform that is not Windows. + * More functions are now documented and available on godoc.org. + +# Version 1.0.1 + + 1. Fix package references from repository location change. + +# Version 1.0.0 + +This version is stable enough for use. The COM API is still incomplete, but provides enough functionality for accessing COM servers using IDispatch interface. + +There is no changelog for this version. Check commits for history. diff --git a/vendor/github.com/go-ole/go-ole/LICENSE b/vendor/github.com/go-ole/go-ole/LICENSE new file mode 100644 index 0000000..623ec06 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright © 2013-2017 Yasuhiro Matsumoto, + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-ole/go-ole/README.md b/vendor/github.com/go-ole/go-ole/README.md new file mode 100644 index 0000000..7b57755 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/README.md @@ -0,0 +1,46 @@ +# Go OLE + +[![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28) +[![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole) +[![GoDoc](https://godoc.org/github.com/go-ole/go-ole?status.svg)](https://godoc.org/github.com/go-ole/go-ole) + +Go bindings for Windows COM using shared libraries instead of cgo. + +By Yasuhiro Matsumoto. + +## Install + +To experiment with go-ole, you can just compile and run the example program: + +``` +go get github.com/go-ole/go-ole +cd /path/to/go-ole/ +go test + +cd /path/to/go-ole/example/excel +go run excel.go +``` + +## Continuous Integration + +Continuous integration configuration has been added for both Travis-CI and AppVeyor. You will have to add these to your own account for your fork in order for it to run. + +**Travis-CI** + +Travis-CI was added to check builds on Linux to ensure that `go get` works when cross building. Currently, Travis-CI is not used to test cross-building, but this may be changed in the future. It is also not currently possible to test the library on Linux, since COM API is specific to Windows and it is not currently possible to run a COM server on Linux or even connect to a remote COM server. + +**AppVeyor** + +AppVeyor is used to build on Windows using the (in-development) test COM server. It is currently only used to test the build and ensure that the code works on Windows. It will be used to register a COM server and then run the test cases based on the test COM server. + +The tests currently do run and do pass and this should be maintained with commits. + +## Versioning + +Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch. + +This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed. + +## LICENSE + +Under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml new file mode 100644 index 0000000..0d557ac --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/appveyor.yml @@ -0,0 +1,54 @@ +# Notes: +# - Minimal appveyor.yml file is an empty file. All sections are optional. +# - Indent each level of configuration with 2 spaces. Do not use tabs! +# - All section names are case-sensitive. +# - Section names should be unique on each level. + +version: "1.3.0.{build}-alpha-{branch}" + +os: Windows Server 2012 R2 + +branches: + only: + - master + - v1.2 + - v1.1 + - v1.0 + +skip_tags: true + +clone_folder: c:\gopath\src\github.com\go-ole\go-ole + +environment: + GOPATH: c:\gopath + matrix: + - GOARCH: amd64 + GOVERSION: 1.5 + GOROOT: c:\go + DOWNLOADPLATFORM: "x64" + +install: + - choco install mingw + - SET PATH=c:\tools\mingw64\bin;%PATH% + # - Download COM Server + - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" + - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL + - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat + # - set + - go version + - go env + - go get -u golang.org/x/tools/cmd/cover + - go get -u golang.org/x/tools/cmd/godoc + - go get -u golang.org/x/tools/cmd/stringer + +build_script: + - cd c:\gopath\src\github.com\go-ole\go-ole + - go get -v -t ./... + - go build + - go test -v -cover ./... + +# disable automatic tests +test: off + +# disable deployment +deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go new file mode 100644 index 0000000..6f986b1 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/com.go @@ -0,0 +1,344 @@ +// +build windows + +package ole + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +var ( + procCoInitialize, _ = modole32.FindProc("CoInitialize") + procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx") + procCoUninitialize, _ = modole32.FindProc("CoUninitialize") + procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance") + procCoTaskMemFree, _ = modole32.FindProc("CoTaskMemFree") + procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID") + procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString") + procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID") + procStringFromIID, _ = modole32.FindProc("StringFromIID") + procIIDFromString, _ = modole32.FindProc("IIDFromString") + procCoGetObject, _ = modole32.FindProc("CoGetObject") + procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID") + procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory") + procVariantInit, _ = modoleaut32.FindProc("VariantInit") + procVariantClear, _ = modoleaut32.FindProc("VariantClear") + procVariantTimeToSystemTime, _ = modoleaut32.FindProc("VariantTimeToSystemTime") + procSysAllocString, _ = modoleaut32.FindProc("SysAllocString") + procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen") + procSysFreeString, _ = modoleaut32.FindProc("SysFreeString") + procSysStringLen, _ = modoleaut32.FindProc("SysStringLen") + procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo") + procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch") + procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject") + + procGetMessageW, _ = moduser32.FindProc("GetMessageW") + procDispatchMessageW, _ = moduser32.FindProc("DispatchMessageW") +) + +// coInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func coInitialize() (err error) { + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx + // Suggests that no value should be passed to CoInitialized. + // Could just be Call() since the parameter is optional. <-- Needs testing to be sure. + hr, _, _ := procCoInitialize.Call(uintptr(0)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// coInitializeEx initializes COM library with concurrency model. +func coInitializeEx(coinit uint32) (err error) { + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx + // Suggests that the first parameter is not only optional but should always be NULL. + hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// CoInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func CoInitialize(p uintptr) (err error) { + // p is ignored and won't be used. + // Avoid any variable not used errors. + p = uintptr(0) + return coInitialize() +} + +// CoInitializeEx initializes COM library with concurrency model. +func CoInitializeEx(p uintptr, coinit uint32) (err error) { + // Avoid any variable not used errors. + p = uintptr(0) + return coInitializeEx(coinit) +} + +// CoUninitialize uninitializes COM Library. +func CoUninitialize() { + procCoUninitialize.Call() +} + +// CoTaskMemFree frees memory pointer. +func CoTaskMemFree(memptr uintptr) { + procCoTaskMemFree.Call(memptr) +} + +// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. +// +// The Programmatic Identifier must be registered, because it will be looked up +// in the Windows Registry. The registry entry has the following keys: CLSID, +// Insertable, Protocol and Shell +// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). +// +// programID identifies the class id with less precision and is not guaranteed +// to be unique. These are usually found in the registry under +// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of +// "Program.Component.Version" with version being optional. +// +// CLSIDFromProgID in Windows API. +func CLSIDFromProgID(progId string) (clsid *GUID, err error) { + var guid GUID + lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) + hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// CLSIDFromString retrieves Class ID from string representation. +// +// This is technically the string version of the GUID and will convert the +// string to object. +// +// CLSIDFromString in Windows API. +func CLSIDFromString(str string) (clsid *GUID, err error) { + var guid GUID + lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str))) + hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// StringFromCLSID returns GUID formated string from GUID object. +func StringFromCLSID(clsid *GUID) (str string, err error) { + var p *uint16 + hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p))) + if hr != 0 { + err = NewError(hr) + } + str = LpOleStrToString(p) + return +} + +// IIDFromString returns GUID from program ID. +func IIDFromString(progId string) (clsid *GUID, err error) { + var guid GUID + lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) + hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// StringFromIID returns GUID formatted string from GUID object. +func StringFromIID(iid *GUID) (str string, err error) { + var p *uint16 + hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p))) + if hr != 0 { + err = NewError(hr) + } + str = LpOleStrToString(p) + return +} + +// CreateInstance of single uninitialized object with GUID. +func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procCoCreateInstance.Call( + uintptr(unsafe.Pointer(clsid)), + 0, + CLSCTX_SERVER, + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// GetActiveObject retrieves pointer to active object. +func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procGetActiveObject.Call( + uintptr(unsafe.Pointer(clsid)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +type BindOpts struct { + CbStruct uint32 + GrfFlags uint32 + GrfMode uint32 + TickCountDeadline uint32 +} + +// GetObject retrieves pointer to active object. +func GetObject(programID string, bindOpts *BindOpts, iid *GUID) (unk *IUnknown, err error) { + if bindOpts != nil { + bindOpts.CbStruct = uint32(unsafe.Sizeof(BindOpts{})) + } + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procCoGetObject.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(programID))), + uintptr(unsafe.Pointer(bindOpts)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// VariantInit initializes variant. +func VariantInit(v *VARIANT) (err error) { + hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// VariantClear clears value in Variant settings to VT_EMPTY. +func VariantClear(v *VARIANT) (err error) { + hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// SysAllocString allocates memory for string and copies string into memory. +func SysAllocString(v string) (ss *int16) { + pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v)))) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +// SysAllocStringLen copies up to length of given string returning pointer. +func SysAllocStringLen(v string) (ss *int16) { + utf16 := utf16.Encode([]rune(v + "\x00")) + ptr := &utf16[0] + + pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1)) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +// SysFreeString frees string system memory. This must be called with SysAllocString. +func SysFreeString(v *int16) (err error) { + hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// SysStringLen is the length of the system allocated string. +func SysStringLen(v *int16) uint32 { + l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v))) + return uint32(l) +} + +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. +func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) { + hr, _, _ := procCreateStdDispatch.Call( + uintptr(unsafe.Pointer(unk)), + v, + uintptr(unsafe.Pointer(ptinfo)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. +func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) { + hr, _, _ := procCreateDispTypeInfo.Call( + uintptr(unsafe.Pointer(idata)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&pptinfo))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// copyMemory moves location of a block of memory. +func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) { + procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length)) +} + +// GetUserDefaultLCID retrieves current user default locale. +func GetUserDefaultLCID() (lcid uint32) { + ret, _, _ := procGetUserDefaultLCID.Call() + lcid = uint32(ret) + return +} + +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. +func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) { + r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax)) + ret = int32(r0) + return +} + +// DispatchMessage to window procedure. +func DispatchMessage(msg *Msg) (ret int32) { + r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg))) + ret = int32(r0) + return +} diff --git a/vendor/github.com/go-ole/go-ole/com_func.go b/vendor/github.com/go-ole/go-ole/com_func.go new file mode 100644 index 0000000..cef539d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/com_func.go @@ -0,0 +1,174 @@ +// +build !windows + +package ole + +import ( + "time" + "unsafe" +) + +// coInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func coInitialize() error { + return NewError(E_NOTIMPL) +} + +// coInitializeEx initializes COM library with concurrency model. +func coInitializeEx(coinit uint32) error { + return NewError(E_NOTIMPL) +} + +// CoInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func CoInitialize(p uintptr) error { + return NewError(E_NOTIMPL) +} + +// CoInitializeEx initializes COM library with concurrency model. +func CoInitializeEx(p uintptr, coinit uint32) error { + return NewError(E_NOTIMPL) +} + +// CoUninitialize uninitializes COM Library. +func CoUninitialize() {} + +// CoTaskMemFree frees memory pointer. +func CoTaskMemFree(memptr uintptr) {} + +// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. +// +// The Programmatic Identifier must be registered, because it will be looked up +// in the Windows Registry. The registry entry has the following keys: CLSID, +// Insertable, Protocol and Shell +// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). +// +// programID identifies the class id with less precision and is not guaranteed +// to be unique. These are usually found in the registry under +// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of +// "Program.Component.Version" with version being optional. +// +// CLSIDFromProgID in Windows API. +func CLSIDFromProgID(progId string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// CLSIDFromString retrieves Class ID from string representation. +// +// This is technically the string version of the GUID and will convert the +// string to object. +// +// CLSIDFromString in Windows API. +func CLSIDFromString(str string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// StringFromCLSID returns GUID formated string from GUID object. +func StringFromCLSID(clsid *GUID) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// IIDFromString returns GUID from program ID. +func IIDFromString(progId string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// StringFromIID returns GUID formatted string from GUID object. +func StringFromIID(iid *GUID) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// CreateInstance of single uninitialized object with GUID. +func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// GetActiveObject retrieves pointer to active object. +func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// VariantInit initializes variant. +func VariantInit(v *VARIANT) error { + return NewError(E_NOTIMPL) +} + +// VariantClear clears value in Variant settings to VT_EMPTY. +func VariantClear(v *VARIANT) error { + return NewError(E_NOTIMPL) +} + +// SysAllocString allocates memory for string and copies string into memory. +func SysAllocString(v string) *int16 { + u := int16(0) + return &u +} + +// SysAllocStringLen copies up to length of given string returning pointer. +func SysAllocStringLen(v string) *int16 { + u := int16(0) + return &u +} + +// SysFreeString frees string system memory. This must be called with SysAllocString. +func SysFreeString(v *int16) error { + return NewError(E_NOTIMPL) +} + +// SysStringLen is the length of the system allocated string. +func SysStringLen(v *int16) uint32 { + return uint32(0) +} + +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. +func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) { + return nil, NewError(E_NOTIMPL) +} + +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. +func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// copyMemory moves location of a block of memory. +func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {} + +// GetUserDefaultLCID retrieves current user default locale. +func GetUserDefaultLCID() uint32 { + return uint32(0) +} + +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. +func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// DispatchMessage to window procedure. +func DispatchMessage(msg *Msg) int32 { + return int32(0) +} + +func GetVariantDate(value uint64) (time.Time, error) { + return time.Now(), NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/connect.go b/vendor/github.com/go-ole/go-ole/connect.go new file mode 100644 index 0000000..b2ac2ec --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/connect.go @@ -0,0 +1,192 @@ +package ole + +// Connection contains IUnknown for fluent interface interaction. +// +// Deprecated. Use oleutil package instead. +type Connection struct { + Object *IUnknown // Access COM +} + +// Initialize COM. +func (*Connection) Initialize() (err error) { + return coInitialize() +} + +// Uninitialize COM. +func (*Connection) Uninitialize() { + CoUninitialize() +} + +// Create IUnknown object based first on ProgId and then from String. +func (c *Connection) Create(progId string) (err error) { + var clsid *GUID + clsid, err = CLSIDFromProgID(progId) + if err != nil { + clsid, err = CLSIDFromString(progId) + if err != nil { + return + } + } + + unknown, err := CreateInstance(clsid, IID_IUnknown) + if err != nil { + return + } + c.Object = unknown + + return +} + +// Release IUnknown object. +func (c *Connection) Release() { + c.Object.Release() +} + +// Load COM object from list of programIDs or strings. +func (c *Connection) Load(names ...string) (errors []error) { + var tempErrors []error = make([]error, len(names)) + var numErrors int = 0 + for _, name := range names { + err := c.Create(name) + if err != nil { + tempErrors = append(tempErrors, err) + numErrors += 1 + continue + } + break + } + + copy(errors, tempErrors[0:numErrors]) + return +} + +// Dispatch returns Dispatch object. +func (c *Connection) Dispatch() (object *Dispatch, err error) { + dispatch, err := c.Object.QueryInterface(IID_IDispatch) + if err != nil { + return + } + object = &Dispatch{dispatch} + return +} + +// Dispatch stores IDispatch object. +type Dispatch struct { + Object *IDispatch // Dispatch object. +} + +// Call method on IDispatch with parameters. +func (d *Dispatch) Call(method string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(method) + if err != nil { + return + } + + result, err = d.Invoke(id, DISPATCH_METHOD, params) + return +} + +// MustCall method on IDispatch with parameters. +func (d *Dispatch) MustCall(method string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(method) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_METHOD, params) + if err != nil { + panic(err) + } + + return +} + +// Get property on IDispatch with parameters. +func (d *Dispatch) Get(name string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(name) + if err != nil { + return + } + result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) + return +} + +// MustGet property on IDispatch with parameters. +func (d *Dispatch) MustGet(name string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(name) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) + if err != nil { + panic(err) + } + return +} + +// Set property on IDispatch with parameters. +func (d *Dispatch) Set(name string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(name) + if err != nil { + return + } + result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) + return +} + +// MustSet property on IDispatch with parameters. +func (d *Dispatch) MustSet(name string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(name) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) + if err != nil { + panic(err) + } + return +} + +// GetId retrieves ID of name on IDispatch. +func (d *Dispatch) GetId(name string) (id int32, err error) { + var dispid []int32 + dispid, err = d.Object.GetIDsOfName([]string{name}) + if err != nil { + return + } + id = dispid[0] + return +} + +// GetIds retrieves all IDs of names on IDispatch. +func (d *Dispatch) GetIds(names ...string) (dispid []int32, err error) { + dispid, err = d.Object.GetIDsOfName(names) + return +} + +// Invoke IDispatch on DisplayID of dispatch type with parameters. +// +// There have been problems where if send cascading params..., it would error +// out because the parameters would be empty. +func (d *Dispatch) Invoke(id int32, dispatch int16, params []interface{}) (result *VARIANT, err error) { + if len(params) < 1 { + result, err = d.Object.Invoke(id, dispatch) + } else { + result, err = d.Object.Invoke(id, dispatch, params...) + } + return +} + +// Release IDispatch object. +func (d *Dispatch) Release() { + d.Object.Release() +} + +// Connect initializes COM and attempts to load IUnknown based on given names. +func Connect(names ...string) (connection *Connection) { + connection.Initialize() + connection.Load(names...) + return +} diff --git a/vendor/github.com/go-ole/go-ole/constants.go b/vendor/github.com/go-ole/go-ole/constants.go new file mode 100644 index 0000000..fd0c6d7 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/constants.go @@ -0,0 +1,153 @@ +package ole + +const ( + CLSCTX_INPROC_SERVER = 1 + CLSCTX_INPROC_HANDLER = 2 + CLSCTX_LOCAL_SERVER = 4 + CLSCTX_INPROC_SERVER16 = 8 + CLSCTX_REMOTE_SERVER = 16 + CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER + CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER + CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER +) + +const ( + COINIT_APARTMENTTHREADED = 0x2 + COINIT_MULTITHREADED = 0x0 + COINIT_DISABLE_OLE1DDE = 0x4 + COINIT_SPEED_OVER_MEMORY = 0x8 +) + +const ( + DISPATCH_METHOD = 1 + DISPATCH_PROPERTYGET = 2 + DISPATCH_PROPERTYPUT = 4 + DISPATCH_PROPERTYPUTREF = 8 +) + +const ( + S_OK = 0x00000000 + E_UNEXPECTED = 0x8000FFFF + E_NOTIMPL = 0x80004001 + E_OUTOFMEMORY = 0x8007000E + E_INVALIDARG = 0x80070057 + E_NOINTERFACE = 0x80004002 + E_POINTER = 0x80004003 + E_HANDLE = 0x80070006 + E_ABORT = 0x80004004 + E_FAIL = 0x80004005 + E_ACCESSDENIED = 0x80070005 + E_PENDING = 0x8000000A + + CO_E_CLASSSTRING = 0x800401F3 +) + +const ( + CC_FASTCALL = iota + CC_CDECL + CC_MSCPASCAL + CC_PASCAL = CC_MSCPASCAL + CC_MACPASCAL + CC_STDCALL + CC_FPFASTCALL + CC_SYSCALL + CC_MPWCDECL + CC_MPWPASCAL + CC_MAX = CC_MPWPASCAL +) + +type VT uint16 + +const ( + VT_EMPTY VT = 0x0 + VT_NULL VT = 0x1 + VT_I2 VT = 0x2 + VT_I4 VT = 0x3 + VT_R4 VT = 0x4 + VT_R8 VT = 0x5 + VT_CY VT = 0x6 + VT_DATE VT = 0x7 + VT_BSTR VT = 0x8 + VT_DISPATCH VT = 0x9 + VT_ERROR VT = 0xa + VT_BOOL VT = 0xb + VT_VARIANT VT = 0xc + VT_UNKNOWN VT = 0xd + VT_DECIMAL VT = 0xe + VT_I1 VT = 0x10 + VT_UI1 VT = 0x11 + VT_UI2 VT = 0x12 + VT_UI4 VT = 0x13 + VT_I8 VT = 0x14 + VT_UI8 VT = 0x15 + VT_INT VT = 0x16 + VT_UINT VT = 0x17 + VT_VOID VT = 0x18 + VT_HRESULT VT = 0x19 + VT_PTR VT = 0x1a + VT_SAFEARRAY VT = 0x1b + VT_CARRAY VT = 0x1c + VT_USERDEFINED VT = 0x1d + VT_LPSTR VT = 0x1e + VT_LPWSTR VT = 0x1f + VT_RECORD VT = 0x24 + VT_INT_PTR VT = 0x25 + VT_UINT_PTR VT = 0x26 + VT_FILETIME VT = 0x40 + VT_BLOB VT = 0x41 + VT_STREAM VT = 0x42 + VT_STORAGE VT = 0x43 + VT_STREAMED_OBJECT VT = 0x44 + VT_STORED_OBJECT VT = 0x45 + VT_BLOB_OBJECT VT = 0x46 + VT_CF VT = 0x47 + VT_CLSID VT = 0x48 + VT_BSTR_BLOB VT = 0xfff + VT_VECTOR VT = 0x1000 + VT_ARRAY VT = 0x2000 + VT_BYREF VT = 0x4000 + VT_RESERVED VT = 0x8000 + VT_ILLEGAL VT = 0xffff + VT_ILLEGALMASKED VT = 0xfff + VT_TYPEMASK VT = 0xfff +) + +const ( + DISPID_UNKNOWN = -1 + DISPID_VALUE = 0 + DISPID_PROPERTYPUT = -3 + DISPID_NEWENUM = -4 + DISPID_EVALUATE = -5 + DISPID_CONSTRUCTOR = -6 + DISPID_DESTRUCTOR = -7 + DISPID_COLLECT = -8 +) + +const ( + TKIND_ENUM = 1 + TKIND_RECORD = 2 + TKIND_MODULE = 3 + TKIND_INTERFACE = 4 + TKIND_DISPATCH = 5 + TKIND_COCLASS = 6 + TKIND_ALIAS = 7 + TKIND_UNION = 8 + TKIND_MAX = 9 +) + +// Safe Array Feature Flags + +const ( + FADF_AUTO = 0x0001 + FADF_STATIC = 0x0002 + FADF_EMBEDDED = 0x0004 + FADF_FIXEDSIZE = 0x0010 + FADF_RECORD = 0x0020 + FADF_HAVEIID = 0x0040 + FADF_HAVEVARTYPE = 0x0080 + FADF_BSTR = 0x0100 + FADF_UNKNOWN = 0x0200 + FADF_DISPATCH = 0x0400 + FADF_VARIANT = 0x0800 + FADF_RESERVED = 0xF008 +) diff --git a/vendor/github.com/go-ole/go-ole/error.go b/vendor/github.com/go-ole/go-ole/error.go new file mode 100644 index 0000000..096b456 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error.go @@ -0,0 +1,51 @@ +package ole + +// OleError stores COM errors. +type OleError struct { + hr uintptr + description string + subError error +} + +// NewError creates new error with HResult. +func NewError(hr uintptr) *OleError { + return &OleError{hr: hr} +} + +// NewErrorWithDescription creates new COM error with HResult and description. +func NewErrorWithDescription(hr uintptr, description string) *OleError { + return &OleError{hr: hr, description: description} +} + +// NewErrorWithSubError creates new COM error with parent error. +func NewErrorWithSubError(hr uintptr, description string, err error) *OleError { + return &OleError{hr: hr, description: description, subError: err} +} + +// Code is the HResult. +func (v *OleError) Code() uintptr { + return uintptr(v.hr) +} + +// String description, either manually set or format message with error code. +func (v *OleError) String() string { + if v.description != "" { + return errstr(int(v.hr)) + " (" + v.description + ")" + } + return errstr(int(v.hr)) +} + +// Error implements error interface. +func (v *OleError) Error() string { + return v.String() +} + +// Description retrieves error summary, if there is one. +func (v *OleError) Description() string { + return v.description +} + +// SubError returns parent error, if there is one. +func (v *OleError) SubError() error { + return v.subError +} diff --git a/vendor/github.com/go-ole/go-ole/error_func.go b/vendor/github.com/go-ole/go-ole/error_func.go new file mode 100644 index 0000000..8a2ffaa --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error_func.go @@ -0,0 +1,8 @@ +// +build !windows + +package ole + +// errstr converts error code to string. +func errstr(errno int) string { + return "" +} diff --git a/vendor/github.com/go-ole/go-ole/error_windows.go b/vendor/github.com/go-ole/go-ole/error_windows.go new file mode 100644 index 0000000..d0e8e68 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error_windows.go @@ -0,0 +1,24 @@ +// +build windows + +package ole + +import ( + "fmt" + "syscall" + "unicode/utf16" +) + +// errstr converts error code to string. +func errstr(errno int) string { + // ask windows for the remaining errors + var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS + b := make([]uint16, 300) + n, err := syscall.FormatMessage(flags, 0, uint32(errno), 0, b, nil) + if err != nil { + return fmt.Sprintf("error %d (FormatMessage failed with: %v)", errno, err) + } + // trim terminating \r and \n + for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { + } + return string(utf16.Decode(b[:n])) +} diff --git a/vendor/github.com/go-ole/go-ole/go.mod b/vendor/github.com/go-ole/go-ole/go.mod new file mode 100644 index 0000000..645d3b7 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/go.mod @@ -0,0 +1,5 @@ +module github.com/go-ole/go-ole + +go 1.12 + +require github.com/go-ole/go-ole v1.2.2 diff --git a/vendor/github.com/go-ole/go-ole/go.sum b/vendor/github.com/go-ole/go-ole/go.sum new file mode 100644 index 0000000..6a8d3b0 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/go.sum @@ -0,0 +1,2 @@ +github.com/go-ole/go-ole v1.2.2 h1:HXmymm3IQ8iAfpqlbbUGLHd+SZrnmI4y1pv+WL/3R7c= +github.com/go-ole/go-ole v1.2.2/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= diff --git a/vendor/github.com/go-ole/go-ole/guid.go b/vendor/github.com/go-ole/go-ole/guid.go new file mode 100644 index 0000000..8d20f68 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/guid.go @@ -0,0 +1,284 @@ +package ole + +var ( + // IID_NULL is null Interface ID, used when no other Interface ID is known. + IID_NULL = NewGUID("{00000000-0000-0000-0000-000000000000}") + + // IID_IUnknown is for IUnknown interfaces. + IID_IUnknown = NewGUID("{00000000-0000-0000-C000-000000000046}") + + // IID_IDispatch is for IDispatch interfaces. + IID_IDispatch = NewGUID("{00020400-0000-0000-C000-000000000046}") + + // IID_IEnumVariant is for IEnumVariant interfaces + IID_IEnumVariant = NewGUID("{00020404-0000-0000-C000-000000000046}") + + // IID_IConnectionPointContainer is for IConnectionPointContainer interfaces. + IID_IConnectionPointContainer = NewGUID("{B196B284-BAB4-101A-B69C-00AA00341D07}") + + // IID_IConnectionPoint is for IConnectionPoint interfaces. + IID_IConnectionPoint = NewGUID("{B196B286-BAB4-101A-B69C-00AA00341D07}") + + // IID_IInspectable is for IInspectable interfaces. + IID_IInspectable = NewGUID("{AF86E2E0-B12D-4C6A-9C5A-D7AA65101E90}") + + // IID_IProvideClassInfo is for IProvideClassInfo interfaces. + IID_IProvideClassInfo = NewGUID("{B196B283-BAB4-101A-B69C-00AA00341D07}") +) + +// These are for testing and not part of any library. +var ( + // IID_ICOMTestString is for ICOMTestString interfaces. + // + // {E0133EB4-C36F-469A-9D3D-C66B84BE19ED} + IID_ICOMTestString = NewGUID("{E0133EB4-C36F-469A-9D3D-C66B84BE19ED}") + + // IID_ICOMTestInt8 is for ICOMTestInt8 interfaces. + // + // {BEB06610-EB84-4155-AF58-E2BFF53680B4} + IID_ICOMTestInt8 = NewGUID("{BEB06610-EB84-4155-AF58-E2BFF53680B4}") + + // IID_ICOMTestInt16 is for ICOMTestInt16 interfaces. + // + // {DAA3F9FA-761E-4976-A860-8364CE55F6FC} + IID_ICOMTestInt16 = NewGUID("{DAA3F9FA-761E-4976-A860-8364CE55F6FC}") + + // IID_ICOMTestInt32 is for ICOMTestInt32 interfaces. + // + // {E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0} + IID_ICOMTestInt32 = NewGUID("{E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}") + + // IID_ICOMTestInt64 is for ICOMTestInt64 interfaces. + // + // {8D437CBC-B3ED-485C-BC32-C336432A1623} + IID_ICOMTestInt64 = NewGUID("{8D437CBC-B3ED-485C-BC32-C336432A1623}") + + // IID_ICOMTestFloat is for ICOMTestFloat interfaces. + // + // {BF1ED004-EA02-456A-AA55-2AC8AC6B054C} + IID_ICOMTestFloat = NewGUID("{BF1ED004-EA02-456A-AA55-2AC8AC6B054C}") + + // IID_ICOMTestDouble is for ICOMTestDouble interfaces. + // + // {BF908A81-8687-4E93-999F-D86FAB284BA0} + IID_ICOMTestDouble = NewGUID("{BF908A81-8687-4E93-999F-D86FAB284BA0}") + + // IID_ICOMTestBoolean is for ICOMTestBoolean interfaces. + // + // {D530E7A6-4EE8-40D1-8931-3D63B8605010} + IID_ICOMTestBoolean = NewGUID("{D530E7A6-4EE8-40D1-8931-3D63B8605010}") + + // IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces. + // + // {6485B1EF-D780-4834-A4FE-1EBB51746CA3} + IID_ICOMEchoTestObject = NewGUID("{6485B1EF-D780-4834-A4FE-1EBB51746CA3}") + + // IID_ICOMTestTypes is for ICOMTestTypes interfaces. + // + // {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0} + IID_ICOMTestTypes = NewGUID("{CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}") + + // CLSID_COMEchoTestObject is for COMEchoTestObject class. + // + // {3C24506A-AE9E-4D50-9157-EF317281F1B0} + CLSID_COMEchoTestObject = NewGUID("{3C24506A-AE9E-4D50-9157-EF317281F1B0}") + + // CLSID_COMTestScalarClass is for COMTestScalarClass class. + // + // {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86} + CLSID_COMTestScalarClass = NewGUID("{865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}") +) + +const hextable = "0123456789ABCDEF" +const emptyGUID = "{00000000-0000-0000-0000-000000000000}" + +// GUID is Windows API specific GUID type. +// +// This exists to match Windows GUID type for direct passing for COM. +// Format is in xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +// NewGUID converts the given string into a globally unique identifier that is +// compliant with the Windows API. +// +// The supplied string may be in any of these formats: +// +// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX +// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} +// +// The conversion of the supplied string is not case-sensitive. +func NewGUID(guid string) *GUID { + d := []byte(guid) + var d1, d2, d3, d4a, d4b []byte + + switch len(d) { + case 38: + if d[0] != '{' || d[37] != '}' { + return nil + } + d = d[1:37] + fallthrough + case 36: + if d[8] != '-' || d[13] != '-' || d[18] != '-' || d[23] != '-' { + return nil + } + d1 = d[0:8] + d2 = d[9:13] + d3 = d[14:18] + d4a = d[19:23] + d4b = d[24:36] + case 32: + d1 = d[0:8] + d2 = d[8:12] + d3 = d[12:16] + d4a = d[16:20] + d4b = d[20:32] + default: + return nil + } + + var g GUID + var ok1, ok2, ok3, ok4 bool + g.Data1, ok1 = decodeHexUint32(d1) + g.Data2, ok2 = decodeHexUint16(d2) + g.Data3, ok3 = decodeHexUint16(d3) + g.Data4, ok4 = decodeHexByte64(d4a, d4b) + if ok1 && ok2 && ok3 && ok4 { + return &g + } + return nil +} + +func decodeHexUint32(src []byte) (value uint32, ok bool) { + var b1, b2, b3, b4 byte + var ok1, ok2, ok3, ok4 bool + b1, ok1 = decodeHexByte(src[0], src[1]) + b2, ok2 = decodeHexByte(src[2], src[3]) + b3, ok3 = decodeHexByte(src[4], src[5]) + b4, ok4 = decodeHexByte(src[6], src[7]) + value = (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4) + ok = ok1 && ok2 && ok3 && ok4 + return +} + +func decodeHexUint16(src []byte) (value uint16, ok bool) { + var b1, b2 byte + var ok1, ok2 bool + b1, ok1 = decodeHexByte(src[0], src[1]) + b2, ok2 = decodeHexByte(src[2], src[3]) + value = (uint16(b1) << 8) | uint16(b2) + ok = ok1 && ok2 + return +} + +func decodeHexByte64(s1 []byte, s2 []byte) (value [8]byte, ok bool) { + var ok1, ok2, ok3, ok4, ok5, ok6, ok7, ok8 bool + value[0], ok1 = decodeHexByte(s1[0], s1[1]) + value[1], ok2 = decodeHexByte(s1[2], s1[3]) + value[2], ok3 = decodeHexByte(s2[0], s2[1]) + value[3], ok4 = decodeHexByte(s2[2], s2[3]) + value[4], ok5 = decodeHexByte(s2[4], s2[5]) + value[5], ok6 = decodeHexByte(s2[6], s2[7]) + value[6], ok7 = decodeHexByte(s2[8], s2[9]) + value[7], ok8 = decodeHexByte(s2[10], s2[11]) + ok = ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8 + return +} + +func decodeHexByte(c1, c2 byte) (value byte, ok bool) { + var n1, n2 byte + var ok1, ok2 bool + n1, ok1 = decodeHexChar(c1) + n2, ok2 = decodeHexChar(c2) + value = (n1 << 4) | n2 + ok = ok1 && ok2 + return +} + +func decodeHexChar(c byte) (byte, bool) { + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + + return 0, false +} + +// String converts the GUID to string form. It will adhere to this pattern: +// +// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} +// +// If the GUID is nil, the string representation of an empty GUID is returned: +// +// {00000000-0000-0000-0000-000000000000} +func (guid *GUID) String() string { + if guid == nil { + return emptyGUID + } + + var c [38]byte + c[0] = '{' + putUint32Hex(c[1:9], guid.Data1) + c[9] = '-' + putUint16Hex(c[10:14], guid.Data2) + c[14] = '-' + putUint16Hex(c[15:19], guid.Data3) + c[19] = '-' + putByteHex(c[20:24], guid.Data4[0:2]) + c[24] = '-' + putByteHex(c[25:37], guid.Data4[2:8]) + c[37] = '}' + return string(c[:]) +} + +func putUint32Hex(b []byte, v uint32) { + b[0] = hextable[byte(v>>24)>>4] + b[1] = hextable[byte(v>>24)&0x0f] + b[2] = hextable[byte(v>>16)>>4] + b[3] = hextable[byte(v>>16)&0x0f] + b[4] = hextable[byte(v>>8)>>4] + b[5] = hextable[byte(v>>8)&0x0f] + b[6] = hextable[byte(v)>>4] + b[7] = hextable[byte(v)&0x0f] +} + +func putUint16Hex(b []byte, v uint16) { + b[0] = hextable[byte(v>>8)>>4] + b[1] = hextable[byte(v>>8)&0x0f] + b[2] = hextable[byte(v)>>4] + b[3] = hextable[byte(v)&0x0f] +} + +func putByteHex(dst, src []byte) { + for i := 0; i < len(src); i++ { + dst[i*2] = hextable[src[i]>>4] + dst[i*2+1] = hextable[src[i]&0x0f] + } +} + +// IsEqualGUID compares two GUID. +// +// Not constant time comparison. +func IsEqualGUID(guid1 *GUID, guid2 *GUID) bool { + return guid1.Data1 == guid2.Data1 && + guid1.Data2 == guid2.Data2 && + guid1.Data3 == guid2.Data3 && + guid1.Data4[0] == guid2.Data4[0] && + guid1.Data4[1] == guid2.Data4[1] && + guid1.Data4[2] == guid2.Data4[2] && + guid1.Data4[3] == guid2.Data4[3] && + guid1.Data4[4] == guid2.Data4[4] && + guid1.Data4[5] == guid2.Data4[5] && + guid1.Data4[6] == guid2.Data4[6] && + guid1.Data4[7] == guid2.Data4[7] +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go new file mode 100644 index 0000000..9e6c49f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go @@ -0,0 +1,20 @@ +package ole + +import "unsafe" + +type IConnectionPoint struct { + IUnknown +} + +type IConnectionPointVtbl struct { + IUnknownVtbl + GetConnectionInterface uintptr + GetConnectionPointContainer uintptr + Advise uintptr + Unadvise uintptr + EnumConnections uintptr +} + +func (v *IConnectionPoint) VTable() *IConnectionPointVtbl { + return (*IConnectionPointVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go new file mode 100644 index 0000000..5414dc3 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go @@ -0,0 +1,21 @@ +// +build !windows + +package ole + +import "unsafe" + +func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { + return int32(0) +} + +func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} + +func (v *IConnectionPoint) Unadvise(cookie uint32) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go new file mode 100644 index 0000000..32bc183 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { + // XXX: This doesn't look like it does what it's supposed to + return release((*IUnknown)(unsafe.Pointer(v))) +} + +func (v *IConnectionPoint) Advise(unknown *IUnknown) (cookie uint32, err error) { + hr, _, _ := syscall.Syscall( + v.VTable().Advise, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(unknown)), + uintptr(unsafe.Pointer(&cookie))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (v *IConnectionPoint) Unadvise(cookie uint32) (err error) { + hr, _, _ := syscall.Syscall( + v.VTable().Unadvise, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(cookie), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go new file mode 100644 index 0000000..165860d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go @@ -0,0 +1,17 @@ +package ole + +import "unsafe" + +type IConnectionPointContainer struct { + IUnknown +} + +type IConnectionPointContainerVtbl struct { + IUnknownVtbl + EnumConnectionPoints uintptr + FindConnectionPoint uintptr +} + +func (v *IConnectionPointContainer) VTable() *IConnectionPointContainerVtbl { + return (*IConnectionPointContainerVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go new file mode 100644 index 0000000..5dfa42a --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go @@ -0,0 +1,11 @@ +// +build !windows + +package ole + +func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go new file mode 100644 index 0000000..ad30d79 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) (err error) { + hr, _, _ := syscall.Syscall( + v.VTable().FindConnectionPoint, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(point))) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch.go b/vendor/github.com/go-ole/go-ole/idispatch.go new file mode 100644 index 0000000..d4af124 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch.go @@ -0,0 +1,94 @@ +package ole + +import "unsafe" + +type IDispatch struct { + IUnknown +} + +type IDispatchVtbl struct { + IUnknownVtbl + GetTypeInfoCount uintptr + GetTypeInfo uintptr + GetIDsOfNames uintptr + Invoke uintptr +} + +func (v *IDispatch) VTable() *IDispatchVtbl { + return (*IDispatchVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IDispatch) GetIDsOfName(names []string) (dispid []int32, err error) { + dispid, err = getIDsOfName(v, names) + return +} + +func (v *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { + result, err = invoke(v, dispid, dispatch, params...) + return +} + +func (v *IDispatch) GetTypeInfoCount() (c uint32, err error) { + c, err = getTypeInfoCount(v) + return +} + +func (v *IDispatch) GetTypeInfo() (tinfo *ITypeInfo, err error) { + tinfo, err = getTypeInfo(v) + return +} + +// GetSingleIDOfName is a helper that returns single display ID for IDispatch name. +// +// This replaces the common pattern of attempting to get a single name from the list of available +// IDs. It gives the first ID, if it is available. +func (v *IDispatch) GetSingleIDOfName(name string) (displayID int32, err error) { + var displayIDs []int32 + displayIDs, err = v.GetIDsOfName([]string{name}) + if err != nil { + return + } + displayID = displayIDs[0] + return +} + +// InvokeWithOptionalArgs accepts arguments as an array, works like Invoke. +// +// Accepts name and will attempt to retrieve Display ID to pass to Invoke. +// +// Passing params as an array is a workaround that could be fixed in later versions of Go that +// prevent passing empty params. During testing it was discovered that this is an acceptable way of +// getting around not being able to pass params normally. +func (v *IDispatch) InvokeWithOptionalArgs(name string, dispatch int16, params []interface{}) (result *VARIANT, err error) { + displayID, err := v.GetSingleIDOfName(name) + if err != nil { + return + } + + if len(params) < 1 { + result, err = v.Invoke(displayID, dispatch) + } else { + result, err = v.Invoke(displayID, dispatch, params...) + } + + return +} + +// CallMethod invokes named function with arguments on object. +func (v *IDispatch) CallMethod(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_METHOD, params) +} + +// GetProperty retrieves the property with the name with the ability to pass arguments. +// +// Most of the time you will not need to pass arguments as most objects do not allow for this +// feature. Or at least, should not allow for this feature. Some servers don't follow best practices +// and this is provided for those edge cases. +func (v *IDispatch) GetProperty(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYGET, params) +} + +// PutProperty attempts to mutate a property in the object. +func (v *IDispatch) PutProperty(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYPUT, params) +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_func.go b/vendor/github.com/go-ole/go-ole/idispatch_func.go new file mode 100644 index 0000000..b8fbbe3 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func getIDsOfName(disp *IDispatch, names []string) ([]int32, error) { + return []int32{}, NewError(E_NOTIMPL) +} + +func getTypeInfoCount(disp *IDispatch) (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} + +func getTypeInfo(disp *IDispatch) (*ITypeInfo, error) { + return nil, NewError(E_NOTIMPL) +} + +func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (*VARIANT, error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go new file mode 100644 index 0000000..6ec180b --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go @@ -0,0 +1,200 @@ +// +build windows + +package ole + +import ( + "math/big" + "syscall" + "time" + "unsafe" +) + +func getIDsOfName(disp *IDispatch, names []string) (dispid []int32, err error) { + wnames := make([]*uint16, len(names)) + for i := 0; i < len(names); i++ { + wnames[i] = syscall.StringToUTF16Ptr(names[i]) + } + dispid = make([]int32, len(names)) + namelen := uint32(len(names)) + hr, _, _ := syscall.Syscall6( + disp.VTable().GetIDsOfNames, + 6, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(unsafe.Pointer(&wnames[0])), + uintptr(namelen), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&dispid[0]))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func getTypeInfoCount(disp *IDispatch) (c uint32, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetTypeInfoCount, + 2, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(&c)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func getTypeInfo(disp *IDispatch) (tinfo *ITypeInfo, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetTypeInfo, + 3, + uintptr(unsafe.Pointer(disp)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&tinfo))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { + var dispparams DISPPARAMS + + if dispatch&DISPATCH_PROPERTYPUT != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.cNamedArgs = 1 + } else if dispatch&DISPATCH_PROPERTYPUTREF != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.cNamedArgs = 1 + } + var vargs []VARIANT + if len(params) > 0 { + vargs = make([]VARIANT, len(params)) + for i, v := range params { + //n := len(params)-i-1 + n := len(params) - i - 1 + VariantInit(&vargs[n]) + switch vv := v.(type) { + case bool: + if vv { + vargs[n] = NewVariant(VT_BOOL, 0xffff) + } else { + vargs[n] = NewVariant(VT_BOOL, 0) + } + case *bool: + vargs[n] = NewVariant(VT_BOOL|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*bool))))) + case uint8: + vargs[n] = NewVariant(VT_I1, int64(v.(uint8))) + case *uint8: + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + case int8: + vargs[n] = NewVariant(VT_I1, int64(v.(int8))) + case *int8: + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + case int16: + vargs[n] = NewVariant(VT_I2, int64(v.(int16))) + case *int16: + vargs[n] = NewVariant(VT_I2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int16))))) + case uint16: + vargs[n] = NewVariant(VT_UI2, int64(v.(uint16))) + case *uint16: + vargs[n] = NewVariant(VT_UI2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint16))))) + case int32: + vargs[n] = NewVariant(VT_I4, int64(v.(int32))) + case *int32: + vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int32))))) + case uint32: + vargs[n] = NewVariant(VT_UI4, int64(v.(uint32))) + case *uint32: + vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint32))))) + case int64: + vargs[n] = NewVariant(VT_I8, int64(v.(int64))) + case *int64: + vargs[n] = NewVariant(VT_I8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int64))))) + case uint64: + vargs[n] = NewVariant(VT_UI8, int64(uintptr(v.(uint64)))) + case *uint64: + vargs[n] = NewVariant(VT_UI8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint64))))) + case int: + vargs[n] = NewVariant(VT_I4, int64(v.(int))) + case *int: + vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int))))) + case uint: + vargs[n] = NewVariant(VT_UI4, int64(v.(uint))) + case *uint: + vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint))))) + case float32: + vargs[n] = NewVariant(VT_R4, *(*int64)(unsafe.Pointer(&vv))) + case *float32: + vargs[n] = NewVariant(VT_R4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float32))))) + case float64: + vargs[n] = NewVariant(VT_R8, *(*int64)(unsafe.Pointer(&vv))) + case *float64: + vargs[n] = NewVariant(VT_R8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float64))))) + case *big.Int: + vargs[n] = NewVariant(VT_DECIMAL, v.(*big.Int).Int64()) + case string: + vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(v.(string)))))) + case *string: + vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*string))))) + case time.Time: + s := vv.Format("2006-01-02 15:04:05") + vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(s))))) + case *time.Time: + s := vv.Format("2006-01-02 15:04:05") + vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(&s)))) + case *IDispatch: + vargs[n] = NewVariant(VT_DISPATCH, int64(uintptr(unsafe.Pointer(v.(*IDispatch))))) + case **IDispatch: + vargs[n] = NewVariant(VT_DISPATCH|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(**IDispatch))))) + case nil: + vargs[n] = NewVariant(VT_NULL, 0) + case *VARIANT: + vargs[n] = NewVariant(VT_VARIANT|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*VARIANT))))) + case []byte: + safeByteArray := safeArrayFromByteSlice(v.([]byte)) + vargs[n] = NewVariant(VT_ARRAY|VT_UI1, int64(uintptr(unsafe.Pointer(safeByteArray)))) + defer VariantClear(&vargs[n]) + case []string: + safeByteArray := safeArrayFromStringSlice(v.([]string)) + vargs[n] = NewVariant(VT_ARRAY|VT_BSTR, int64(uintptr(unsafe.Pointer(safeByteArray)))) + defer VariantClear(&vargs[n]) + default: + panic("unknown type") + } + } + dispparams.rgvarg = uintptr(unsafe.Pointer(&vargs[0])) + dispparams.cArgs = uint32(len(params)) + } + + result = new(VARIANT) + var excepInfo EXCEPINFO + VariantInit(result) + hr, _, _ := syscall.Syscall9( + disp.VTable().Invoke, + 9, + uintptr(unsafe.Pointer(disp)), + uintptr(dispid), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(GetUserDefaultLCID()), + uintptr(dispatch), + uintptr(unsafe.Pointer(&dispparams)), + uintptr(unsafe.Pointer(result)), + uintptr(unsafe.Pointer(&excepInfo)), + 0) + if hr != 0 { + err = NewErrorWithSubError(hr, BstrToString(excepInfo.bstrDescription), excepInfo) + } + for i, varg := range vargs { + n := len(params) - i - 1 + if varg.VT == VT_BSTR && varg.Val != 0 { + SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val))))) + } + if varg.VT == (VT_BSTR|VT_BYREF) && varg.Val != 0 { + *(params[n].(*string)) = LpOleStrToString(*(**uint16)(unsafe.Pointer(uintptr(varg.Val)))) + } + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant.go b/vendor/github.com/go-ole/go-ole/ienumvariant.go new file mode 100644 index 0000000..2433897 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant.go @@ -0,0 +1,19 @@ +package ole + +import "unsafe" + +type IEnumVARIANT struct { + IUnknown +} + +type IEnumVARIANTVtbl struct { + IUnknownVtbl + Next uintptr + Skip uintptr + Reset uintptr + Clone uintptr +} + +func (v *IEnumVARIANT) VTable() *IEnumVARIANTVtbl { + return (*IEnumVARIANTVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go new file mode 100644 index 0000000..c148481 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func (enum *IEnumVARIANT) Clone() (*IEnumVARIANT, error) { + return nil, NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Reset() error { + return NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Skip(celt uint) error { + return NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Next(celt uint) (VARIANT, uint, error) { + return NewVariant(VT_NULL, int64(0)), 0, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go new file mode 100644 index 0000000..4781f3b --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go @@ -0,0 +1,63 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (enum *IEnumVARIANT) Clone() (cloned *IEnumVARIANT, err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Clone, + 2, + uintptr(unsafe.Pointer(enum)), + uintptr(unsafe.Pointer(&cloned)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Reset() (err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Reset, + 1, + uintptr(unsafe.Pointer(enum)), + 0, + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Skip(celt uint) (err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Skip, + 2, + uintptr(unsafe.Pointer(enum)), + uintptr(celt), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Next(celt uint) (array VARIANT, length uint, err error) { + hr, _, _ := syscall.Syscall6( + enum.VTable().Next, + 4, + uintptr(unsafe.Pointer(enum)), + uintptr(celt), + uintptr(unsafe.Pointer(&array)), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable.go b/vendor/github.com/go-ole/go-ole/iinspectable.go new file mode 100644 index 0000000..f4a19e2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable.go @@ -0,0 +1,18 @@ +package ole + +import "unsafe" + +type IInspectable struct { + IUnknown +} + +type IInspectableVtbl struct { + IUnknownVtbl + GetIIds uintptr + GetRuntimeClassName uintptr + GetTrustLevel uintptr +} + +func (v *IInspectable) VTable() *IInspectableVtbl { + return (*IInspectableVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_func.go b/vendor/github.com/go-ole/go-ole/iinspectable_func.go new file mode 100644 index 0000000..348829b --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable_func.go @@ -0,0 +1,15 @@ +// +build !windows + +package ole + +func (v *IInspectable) GetIids() ([]*GUID, error) { + return []*GUID{}, NewError(E_NOTIMPL) +} + +func (v *IInspectable) GetRuntimeClassName() (string, error) { + return "", NewError(E_NOTIMPL) +} + +func (v *IInspectable) GetTrustLevel() (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go new file mode 100644 index 0000000..4519a4a --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go @@ -0,0 +1,72 @@ +// +build windows + +package ole + +import ( + "bytes" + "encoding/binary" + "reflect" + "syscall" + "unsafe" +) + +func (v *IInspectable) GetIids() (iids []*GUID, err error) { + var count uint32 + var array uintptr + hr, _, _ := syscall.Syscall( + v.VTable().GetIIds, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&count)), + uintptr(unsafe.Pointer(&array))) + if hr != 0 { + err = NewError(hr) + return + } + defer CoTaskMemFree(array) + + iids = make([]*GUID, count) + byteCount := count * uint32(unsafe.Sizeof(GUID{})) + slicehdr := reflect.SliceHeader{Data: array, Len: int(byteCount), Cap: int(byteCount)} + byteSlice := *(*[]byte)(unsafe.Pointer(&slicehdr)) + reader := bytes.NewReader(byteSlice) + for i := range iids { + guid := GUID{} + err = binary.Read(reader, binary.LittleEndian, &guid) + if err != nil { + return + } + iids[i] = &guid + } + return +} + +func (v *IInspectable) GetRuntimeClassName() (s string, err error) { + var hstring HString + hr, _, _ := syscall.Syscall( + v.VTable().GetRuntimeClassName, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&hstring)), + 0) + if hr != 0 { + err = NewError(hr) + return + } + s = hstring.String() + DeleteHString(hstring) + return +} + +func (v *IInspectable) GetTrustLevel() (level uint32, err error) { + hr, _, _ := syscall.Syscall( + v.VTable().GetTrustLevel, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&level)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go new file mode 100644 index 0000000..25f3a6f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go @@ -0,0 +1,21 @@ +package ole + +import "unsafe" + +type IProvideClassInfo struct { + IUnknown +} + +type IProvideClassInfoVtbl struct { + IUnknownVtbl + GetClassInfo uintptr +} + +func (v *IProvideClassInfo) VTable() *IProvideClassInfoVtbl { + return (*IProvideClassInfoVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IProvideClassInfo) GetClassInfo() (cinfo *ITypeInfo, err error) { + cinfo, err = getClassInfo(v) + return +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go new file mode 100644 index 0000000..7e3cb63 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go @@ -0,0 +1,7 @@ +// +build !windows + +package ole + +func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go new file mode 100644 index 0000000..2ad0163 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetClassInfo, + 2, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(&tinfo)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo.go b/vendor/github.com/go-ole/go-ole/itypeinfo.go new file mode 100644 index 0000000..dd3c5e2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo.go @@ -0,0 +1,34 @@ +package ole + +import "unsafe" + +type ITypeInfo struct { + IUnknown +} + +type ITypeInfoVtbl struct { + IUnknownVtbl + GetTypeAttr uintptr + GetTypeComp uintptr + GetFuncDesc uintptr + GetVarDesc uintptr + GetNames uintptr + GetRefTypeOfImplType uintptr + GetImplTypeFlags uintptr + GetIDsOfNames uintptr + Invoke uintptr + GetDocumentation uintptr + GetDllEntry uintptr + GetRefTypeInfo uintptr + AddressOfMember uintptr + CreateInstance uintptr + GetMops uintptr + GetContainingTypeLib uintptr + ReleaseTypeAttr uintptr + ReleaseFuncDesc uintptr + ReleaseVarDesc uintptr +} + +func (v *ITypeInfo) VTable() *ITypeInfoVtbl { + return (*ITypeInfoVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go new file mode 100644 index 0000000..8364a65 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go @@ -0,0 +1,7 @@ +// +build !windows + +package ole + +func (v *ITypeInfo) GetTypeAttr() (*TYPEATTR, error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go new file mode 100644 index 0000000..54782b3 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *ITypeInfo) GetTypeAttr() (tattr *TYPEATTR, err error) { + hr, _, _ := syscall.Syscall( + uintptr(v.VTable().GetTypeAttr), + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&tattr)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown.go b/vendor/github.com/go-ole/go-ole/iunknown.go new file mode 100644 index 0000000..108f28e --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown.go @@ -0,0 +1,57 @@ +package ole + +import "unsafe" + +type IUnknown struct { + RawVTable *interface{} +} + +type IUnknownVtbl struct { + QueryInterface uintptr + AddRef uintptr + Release uintptr +} + +type UnknownLike interface { + QueryInterface(iid *GUID) (disp *IDispatch, err error) + AddRef() int32 + Release() int32 +} + +func (v *IUnknown) VTable() *IUnknownVtbl { + return (*IUnknownVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IUnknown) PutQueryInterface(interfaceID *GUID, obj interface{}) error { + return reflectQueryInterface(v, v.VTable().QueryInterface, interfaceID, obj) +} + +func (v *IUnknown) IDispatch(interfaceID *GUID) (dispatch *IDispatch, err error) { + err = v.PutQueryInterface(interfaceID, &dispatch) + return +} + +func (v *IUnknown) IEnumVARIANT(interfaceID *GUID) (enum *IEnumVARIANT, err error) { + err = v.PutQueryInterface(interfaceID, &enum) + return +} + +func (v *IUnknown) QueryInterface(iid *GUID) (*IDispatch, error) { + return queryInterface(v, iid) +} + +func (v *IUnknown) MustQueryInterface(iid *GUID) (disp *IDispatch) { + unk, err := queryInterface(v, iid) + if err != nil { + panic(err) + } + return unk +} + +func (v *IUnknown) AddRef() int32 { + return addRef(v) +} + +func (v *IUnknown) Release() int32 { + return release(v) +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_func.go b/vendor/github.com/go-ole/go-ole/iunknown_func.go new file mode 100644 index 0000000..d0a62cf --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + return NewError(E_NOTIMPL) +} + +func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { + return nil, NewError(E_NOTIMPL) +} + +func addRef(unk *IUnknown) int32 { + return 0 +} + +func release(unk *IUnknown) int32 { + return 0 +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_windows.go b/vendor/github.com/go-ole/go-ole/iunknown_windows.go new file mode 100644 index 0000000..ede5bb8 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown_windows.go @@ -0,0 +1,58 @@ +// +build windows + +package ole + +import ( + "reflect" + "syscall" + "unsafe" +) + +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + selfValue := reflect.ValueOf(self).Elem() + objValue := reflect.ValueOf(obj).Elem() + + hr, _, _ := syscall.Syscall( + method, + 3, + selfValue.UnsafeAddr(), + uintptr(unsafe.Pointer(interfaceID)), + objValue.Addr().Pointer()) + if hr != 0 { + err = NewError(hr) + } + return +} + +func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { + hr, _, _ := syscall.Syscall( + unk.VTable().QueryInterface, + 3, + uintptr(unsafe.Pointer(unk)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func addRef(unk *IUnknown) int32 { + ret, _, _ := syscall.Syscall( + unk.VTable().AddRef, + 1, + uintptr(unsafe.Pointer(unk)), + 0, + 0) + return int32(ret) +} + +func release(unk *IUnknown) int32 { + ret, _, _ := syscall.Syscall( + unk.VTable().Release, + 1, + uintptr(unsafe.Pointer(unk)), + 0, + 0) + return int32(ret) +} diff --git a/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/go-ole/go-ole/ole.go new file mode 100644 index 0000000..e2ae4f4 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ole.go @@ -0,0 +1,157 @@ +package ole + +import ( + "fmt" + "strings" +) + +// DISPPARAMS are the arguments that passed to methods or property. +type DISPPARAMS struct { + rgvarg uintptr + rgdispidNamedArgs uintptr + cArgs uint32 + cNamedArgs uint32 +} + +// EXCEPINFO defines exception info. +type EXCEPINFO struct { + wCode uint16 + wReserved uint16 + bstrSource *uint16 + bstrDescription *uint16 + bstrHelpFile *uint16 + dwHelpContext uint32 + pvReserved uintptr + pfnDeferredFillIn uintptr + scode uint32 +} + +// WCode return wCode in EXCEPINFO. +func (e EXCEPINFO) WCode() uint16 { + return e.wCode +} + +// SCODE return scode in EXCEPINFO. +func (e EXCEPINFO) SCODE() uint32 { + return e.scode +} + +// String convert EXCEPINFO to string. +func (e EXCEPINFO) String() string { + var src, desc, hlp string + if e.bstrSource == nil { + src = "" + } else { + src = BstrToString(e.bstrSource) + } + + if e.bstrDescription == nil { + desc = "" + } else { + desc = BstrToString(e.bstrDescription) + } + + if e.bstrHelpFile == nil { + hlp = "" + } else { + hlp = BstrToString(e.bstrHelpFile) + } + + return fmt.Sprintf( + "wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x", + e.wCode, src, desc, hlp, e.dwHelpContext, e.scode, + ) +} + +// Error implements error interface and returns error string. +func (e EXCEPINFO) Error() string { + if e.bstrDescription != nil { + return strings.TrimSpace(BstrToString(e.bstrDescription)) + } + + src := "Unknown" + if e.bstrSource != nil { + src = BstrToString(e.bstrSource) + } + + code := e.scode + if e.wCode != 0 { + code = uint32(e.wCode) + } + + return fmt.Sprintf("%v: %#x", src, code) +} + +// PARAMDATA defines parameter data type. +type PARAMDATA struct { + Name *int16 + Vt uint16 +} + +// METHODDATA defines method info. +type METHODDATA struct { + Name *uint16 + Data *PARAMDATA + Dispid int32 + Meth uint32 + CC int32 + CArgs uint32 + Flags uint16 + VtReturn uint32 +} + +// INTERFACEDATA defines interface info. +type INTERFACEDATA struct { + MethodData *METHODDATA + CMembers uint32 +} + +// Point is 2D vector type. +type Point struct { + X int32 + Y int32 +} + +// Msg is message between processes. +type Msg struct { + Hwnd uint32 + Message uint32 + Wparam int32 + Lparam int32 + Time uint32 + Pt Point +} + +// TYPEDESC defines data type. +type TYPEDESC struct { + Hreftype uint32 + VT uint16 +} + +// IDLDESC defines IDL info. +type IDLDESC struct { + DwReserved uint32 + WIDLFlags uint16 +} + +// TYPEATTR defines type info. +type TYPEATTR struct { + Guid GUID + Lcid uint32 + dwReserved uint32 + MemidConstructor int32 + MemidDestructor int32 + LpstrSchema *uint16 + CbSizeInstance uint32 + Typekind int32 + CFuncs uint16 + CVars uint16 + CImplTypes uint16 + CbSizeVft uint16 + CbAlignment uint16 + WTypeFlags uint16 + WMajorVerNum uint16 + WMinorVerNum uint16 + TdescAlias TYPEDESC + IdldescType IDLDESC +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection.go b/vendor/github.com/go-ole/go-ole/oleutil/connection.go new file mode 100644 index 0000000..60df73c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection.go @@ -0,0 +1,100 @@ +// +build windows + +package oleutil + +import ( + "reflect" + "unsafe" + + ole "github.com/go-ole/go-ole" +) + +type stdDispatch struct { + lpVtbl *stdDispatchVtbl + ref int32 + iid *ole.GUID + iface interface{} + funcMap map[string]int32 +} + +type stdDispatchVtbl struct { + pQueryInterface uintptr + pAddRef uintptr + pRelease uintptr + pGetTypeInfoCount uintptr + pGetTypeInfo uintptr + pGetIDsOfNames uintptr + pInvoke uintptr +} + +func dispQueryInterface(this *ole.IUnknown, iid *ole.GUID, punk **ole.IUnknown) uint32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + *punk = nil + if ole.IsEqualGUID(iid, ole.IID_IUnknown) || + ole.IsEqualGUID(iid, ole.IID_IDispatch) { + dispAddRef(this) + *punk = this + return ole.S_OK + } + if ole.IsEqualGUID(iid, pthis.iid) { + dispAddRef(this) + *punk = this + return ole.S_OK + } + return ole.E_NOINTERFACE +} + +func dispAddRef(this *ole.IUnknown) int32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + pthis.ref++ + return pthis.ref +} + +func dispRelease(this *ole.IUnknown) int32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + pthis.ref-- + return pthis.ref +} + +func dispGetIDsOfNames(this *ole.IUnknown, iid *ole.GUID, wnames []*uint16, namelen int, lcid int, pdisp []int32) uintptr { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + names := make([]string, len(wnames)) + for i := 0; i < len(names); i++ { + names[i] = ole.LpOleStrToString(wnames[i]) + } + for n := 0; n < namelen; n++ { + if id, ok := pthis.funcMap[names[n]]; ok { + pdisp[n] = id + } + } + return ole.S_OK +} + +func dispGetTypeInfoCount(pcount *int) uintptr { + if pcount != nil { + *pcount = 0 + } + return ole.S_OK +} + +func dispGetTypeInfo(ptypeif *uintptr) uintptr { + return ole.E_NOTIMPL +} + +func dispInvoke(this *ole.IDispatch, dispid int32, riid *ole.GUID, lcid int, flags int16, dispparams *ole.DISPPARAMS, result *ole.VARIANT, pexcepinfo *ole.EXCEPINFO, nerr *uint) uintptr { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + found := "" + for name, id := range pthis.funcMap { + if id == dispid { + found = name + } + } + if found != "" { + rv := reflect.ValueOf(pthis.iface).Elem() + rm := rv.MethodByName(found) + rr := rm.Call([]reflect.Value{}) + println(len(rr)) + return ole.S_OK + } + return ole.E_NOTIMPL +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go new file mode 100644 index 0000000..8818fb8 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go @@ -0,0 +1,10 @@ +// +build !windows + +package oleutil + +import ole "github.com/go-ole/go-ole" + +// ConnectObject creates a connection point between two services for communication. +func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (uint32, error) { + return 0, ole.NewError(ole.E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go new file mode 100644 index 0000000..ab9c0d8 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go @@ -0,0 +1,58 @@ +// +build windows + +package oleutil + +import ( + "reflect" + "syscall" + "unsafe" + + ole "github.com/go-ole/go-ole" +) + +// ConnectObject creates a connection point between two services for communication. +func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (cookie uint32, err error) { + unknown, err := disp.QueryInterface(ole.IID_IConnectionPointContainer) + if err != nil { + return + } + + container := (*ole.IConnectionPointContainer)(unsafe.Pointer(unknown)) + var point *ole.IConnectionPoint + err = container.FindConnectionPoint(iid, &point) + if err != nil { + return + } + if edisp, ok := idisp.(*ole.IUnknown); ok { + cookie, err = point.Advise(edisp) + container.Release() + if err != nil { + return + } + } + rv := reflect.ValueOf(disp).Elem() + if rv.Type().Kind() == reflect.Struct { + dest := &stdDispatch{} + dest.lpVtbl = &stdDispatchVtbl{} + dest.lpVtbl.pQueryInterface = syscall.NewCallback(dispQueryInterface) + dest.lpVtbl.pAddRef = syscall.NewCallback(dispAddRef) + dest.lpVtbl.pRelease = syscall.NewCallback(dispRelease) + dest.lpVtbl.pGetTypeInfoCount = syscall.NewCallback(dispGetTypeInfoCount) + dest.lpVtbl.pGetTypeInfo = syscall.NewCallback(dispGetTypeInfo) + dest.lpVtbl.pGetIDsOfNames = syscall.NewCallback(dispGetIDsOfNames) + dest.lpVtbl.pInvoke = syscall.NewCallback(dispInvoke) + dest.iface = disp + dest.iid = iid + cookie, err = point.Advise((*ole.IUnknown)(unsafe.Pointer(dest))) + container.Release() + if err != nil { + point.Release() + return + } + return + } + + container.Release() + + return 0, ole.NewError(ole.E_INVALIDARG) +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go new file mode 100644 index 0000000..5834762 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go @@ -0,0 +1,6 @@ +// This file is here so go get succeeds as without it errors with: +// no buildable Go source files in ... +// +// +build !windows + +package oleutil diff --git a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go new file mode 100644 index 0000000..f7803c1 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go @@ -0,0 +1,127 @@ +package oleutil + +import ole "github.com/go-ole/go-ole" + +// ClassIDFrom retrieves class ID whether given is program ID or application string. +func ClassIDFrom(programID string) (classID *ole.GUID, err error) { + return ole.ClassIDFrom(programID) +} + +// CreateObject creates object from programID based on interface type. +// +// Only supports IUnknown. +// +// Program ID can be either program ID or application string. +func CreateObject(programID string) (unknown *ole.IUnknown, err error) { + classID, err := ole.ClassIDFrom(programID) + if err != nil { + return + } + + unknown, err = ole.CreateInstance(classID, ole.IID_IUnknown) + if err != nil { + return + } + + return +} + +// GetActiveObject retrieves active object for program ID and interface ID based +// on interface type. +// +// Only supports IUnknown. +// +// Program ID can be either program ID or application string. +func GetActiveObject(programID string) (unknown *ole.IUnknown, err error) { + classID, err := ole.ClassIDFrom(programID) + if err != nil { + return + } + + unknown, err = ole.GetActiveObject(classID, ole.IID_IUnknown) + if err != nil { + return + } + + return +} + +// CallMethod calls method on IDispatch with parameters. +func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_METHOD, params) +} + +// MustCallMethod calls method on IDispatch with parameters or panics. +func MustCallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := CallMethod(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// GetProperty retrieves property from IDispatch. +func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYGET, params) +} + +// MustGetProperty retrieves property from IDispatch or panics. +func MustGetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := GetProperty(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// PutProperty mutates property. +func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUT, params) +} + +// MustPutProperty mutates property or panics. +func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := PutProperty(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// PutPropertyRef mutates property reference. +func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params) +} + +// MustPutPropertyRef mutates property reference or panics. +func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := PutPropertyRef(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error { + newEnum, err := disp.GetProperty("_NewEnum") + if err != nil { + return err + } + defer newEnum.Clear() + + enum, err := newEnum.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + defer enum.Release() + + for item, length, err := enum.Next(1); length > 0; item, length, err = enum.Next(1) { + if err != nil { + return err + } + if ferr := f(&item); ferr != nil { + return ferr + } + } + return nil +} diff --git a/vendor/github.com/go-ole/go-ole/safearray.go b/vendor/github.com/go-ole/go-ole/safearray.go new file mode 100644 index 0000000..a5201b5 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray.go @@ -0,0 +1,27 @@ +// Package is meant to retrieve and process safe array data returned from COM. + +package ole + +// SafeArrayBound defines the SafeArray boundaries. +type SafeArrayBound struct { + Elements uint32 + LowerBound int32 +} + +// SafeArray is how COM handles arrays. +type SafeArray struct { + Dimensions uint16 + FeaturesFlag uint16 + ElementsSize uint32 + LocksAmount uint32 + Data uint32 + Bounds [16]byte +} + +// SAFEARRAY is obsolete, exists for backwards compatibility. +// Use SafeArray +type SAFEARRAY SafeArray + +// SAFEARRAYBOUND is obsolete, exists for backwards compatibility. +// Use SafeArrayBound +type SAFEARRAYBOUND SafeArrayBound diff --git a/vendor/github.com/go-ole/go-ole/safearray_func.go b/vendor/github.com/go-ole/go-ole/safearray_func.go new file mode 100644 index 0000000..0dee670 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray_func.go @@ -0,0 +1,211 @@ +// +build !windows + +package ole + +import ( + "unsafe" +) + +// safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. +func safeArrayAccessData(safearray *SafeArray) (uintptr, error) { + return uintptr(0), NewError(E_NOTIMPL) +} + +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. +func safeArrayUnaccessData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. +func safeArrayAllocData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. +func safeArrayAllocDescriptor(dimensions uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. +func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. +func safeArrayCopy(original *SafeArray) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. +func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. +func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. +func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. +func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. +func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. +func safeArrayDestroy(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. +func safeArrayDestroyData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. +func safeArrayDestroyDescriptor(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. +func safeArrayGetDim(safearray *SafeArray) (*uint32, error) { + u := uint32(0) + return &u, NewError(E_NOTIMPL) +} + +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. +func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) { + u := uint32(0) + return &u, NewError(E_NOTIMPL) +} + +// safeArrayGetElement retrieves element at given index. +func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetElement retrieves element at given index and converts to string. +func safeArrayGetElementString(safearray *SafeArray, index int32) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. +func safeArrayGetIID(safearray *SafeArray) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. +func safeArrayGetVartype(safearray *SafeArray) (uint16, error) { + return uint16(0), NewError(E_NOTIMPL) +} + +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. +func safeArrayLock(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. +func safeArrayUnlock(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. +func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArrayGetRecordInfo(safearray *SafeArray) (interface{}, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/go-ole/go-ole/safearray_windows.go new file mode 100644 index 0000000..b48a239 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray_windows.go @@ -0,0 +1,337 @@ +// +build windows + +package ole + +import ( + "unsafe" +) + +var ( + procSafeArrayAccessData, _ = modoleaut32.FindProc("SafeArrayAccessData") + procSafeArrayAllocData, _ = modoleaut32.FindProc("SafeArrayAllocData") + procSafeArrayAllocDescriptor, _ = modoleaut32.FindProc("SafeArrayAllocDescriptor") + procSafeArrayAllocDescriptorEx, _ = modoleaut32.FindProc("SafeArrayAllocDescriptorEx") + procSafeArrayCopy, _ = modoleaut32.FindProc("SafeArrayCopy") + procSafeArrayCopyData, _ = modoleaut32.FindProc("SafeArrayCopyData") + procSafeArrayCreate, _ = modoleaut32.FindProc("SafeArrayCreate") + procSafeArrayCreateEx, _ = modoleaut32.FindProc("SafeArrayCreateEx") + procSafeArrayCreateVector, _ = modoleaut32.FindProc("SafeArrayCreateVector") + procSafeArrayCreateVectorEx, _ = modoleaut32.FindProc("SafeArrayCreateVectorEx") + procSafeArrayDestroy, _ = modoleaut32.FindProc("SafeArrayDestroy") + procSafeArrayDestroyData, _ = modoleaut32.FindProc("SafeArrayDestroyData") + procSafeArrayDestroyDescriptor, _ = modoleaut32.FindProc("SafeArrayDestroyDescriptor") + procSafeArrayGetDim, _ = modoleaut32.FindProc("SafeArrayGetDim") + procSafeArrayGetElement, _ = modoleaut32.FindProc("SafeArrayGetElement") + procSafeArrayGetElemsize, _ = modoleaut32.FindProc("SafeArrayGetElemsize") + procSafeArrayGetIID, _ = modoleaut32.FindProc("SafeArrayGetIID") + procSafeArrayGetLBound, _ = modoleaut32.FindProc("SafeArrayGetLBound") + procSafeArrayGetUBound, _ = modoleaut32.FindProc("SafeArrayGetUBound") + procSafeArrayGetVartype, _ = modoleaut32.FindProc("SafeArrayGetVartype") + procSafeArrayLock, _ = modoleaut32.FindProc("SafeArrayLock") + procSafeArrayPtrOfIndex, _ = modoleaut32.FindProc("SafeArrayPtrOfIndex") + procSafeArrayUnaccessData, _ = modoleaut32.FindProc("SafeArrayUnaccessData") + procSafeArrayUnlock, _ = modoleaut32.FindProc("SafeArrayUnlock") + procSafeArrayPutElement, _ = modoleaut32.FindProc("SafeArrayPutElement") + //procSafeArrayRedim, _ = modoleaut32.FindProc("SafeArrayRedim") // TODO + //procSafeArraySetIID, _ = modoleaut32.FindProc("SafeArraySetIID") // TODO + procSafeArrayGetRecordInfo, _ = modoleaut32.FindProc("SafeArrayGetRecordInfo") + procSafeArraySetRecordInfo, _ = modoleaut32.FindProc("SafeArraySetRecordInfo") +) + +// safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. +// Todo: Test +func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) { + err = convertHresultToError( + procSafeArrayAccessData.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&element)))) + return +} + +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. +func safeArrayUnaccessData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayUnaccessData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. +func safeArrayAllocData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayAllocData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. +func safeArrayAllocDescriptor(dimensions uint32) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayAllocDescriptor.Call(uintptr(dimensions), uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. +func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayAllocDescriptorEx.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. +func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayCopy.Call( + uintptr(unsafe.Pointer(original)), + uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. +func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) { + err = convertHresultToError( + procSafeArrayCopyData.Call( + uintptr(unsafe.Pointer(original)), + uintptr(unsafe.Pointer(duplicate)))) + return +} + +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. +func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreate.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(bounds))) + safearray = (*SafeArray)(unsafe.Pointer(&sa)) + return +} + +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. +func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateEx.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(bounds)), + extra) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. +func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateVector.Call( + uintptr(variantType), + uintptr(lowerBound), + uintptr(length)) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. +func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateVectorEx.Call( + uintptr(variantType), + uintptr(lowerBound), + uintptr(length), + extra) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. +func safeArrayDestroy(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroy.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. +func safeArrayDestroyData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroyData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. +func safeArrayDestroyDescriptor(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroyDescriptor.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. +func safeArrayGetDim(safearray *SafeArray) (dimensions *uint32, err error) { + l, _, err := procSafeArrayGetDim.Call(uintptr(unsafe.Pointer(safearray))) + dimensions = (*uint32)(unsafe.Pointer(l)) + return +} + +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. +func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) { + l, _, err := procSafeArrayGetElemsize.Call(uintptr(unsafe.Pointer(safearray))) + length = (*uint32)(unsafe.Pointer(l)) + return +} + +// safeArrayGetElement retrieves element at given index. +func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { + return convertHresultToError( + procSafeArrayGetElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(pv))) +} + +// safeArrayGetElementString retrieves element at given index and converts to string. +func safeArrayGetElementString(safearray *SafeArray, index int32) (str string, err error) { + var element *int16 + err = convertHresultToError( + procSafeArrayGetElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(unsafe.Pointer(&element)))) + str = BstrToString(*(**uint16)(unsafe.Pointer(&element))) + SysFreeString(element) + return +} + +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. +func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) { + err = convertHresultToError( + procSafeArrayGetIID.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&guid)))) + return +} + +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int32, err error) { + err = convertHresultToError( + procSafeArrayGetLBound.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(dimension), + uintptr(unsafe.Pointer(&lowerBound)))) + return +} + +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int32, err error) { + err = convertHresultToError( + procSafeArrayGetUBound.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(dimension), + uintptr(unsafe.Pointer(&upperBound)))) + return +} + +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. +func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) { + err = convertHresultToError( + procSafeArrayGetVartype.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&varType)))) + return +} + +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. +func safeArrayLock(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayLock.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. +func safeArrayUnlock(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayUnlock.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. +func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (err error) { + err = convertHresultToError( + procSafeArrayPutElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(unsafe.Pointer(element)))) + return +} + +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err error) { + err = convertHresultToError( + procSafeArrayGetRecordInfo.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&recordInfo)))) + return +} + +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) (err error) { + err = convertHresultToError( + procSafeArraySetRecordInfo.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&recordInfo)))) + return +} diff --git a/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/go-ole/go-ole/safearrayconversion.go new file mode 100644 index 0000000..259f488 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearrayconversion.go @@ -0,0 +1,140 @@ +// Helper for converting SafeArray to array of objects. + +package ole + +import ( + "unsafe" +) + +type SafeArrayConversion struct { + Array *SafeArray +} + +func (sac *SafeArrayConversion) ToStringArray() (strings []string) { + totalElements, _ := sac.TotalElements(0) + strings = make([]string, totalElements) + + for i := int32(0); i < totalElements; i++ { + strings[int32(i)], _ = safeArrayGetElementString(sac.Array, i) + } + + return +} + +func (sac *SafeArrayConversion) ToByteArray() (bytes []byte) { + totalElements, _ := sac.TotalElements(0) + bytes = make([]byte, totalElements) + + for i := int32(0); i < totalElements; i++ { + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&bytes[int32(i)])) + } + + return +} + +func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) { + totalElements, _ := sac.TotalElements(0) + values = make([]interface{}, totalElements) + vt, _ := safeArrayGetVartype(sac.Array) + + for i := int32(0); i < totalElements; i++ { + switch VT(vt) { + case VT_BOOL: + var v bool + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I1: + var v int8 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I2: + var v int16 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I4: + var v int32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I8: + var v int64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI1: + var v uint8 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI2: + var v uint16 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI4: + var v uint32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI8: + var v uint64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_R4: + var v float32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_R8: + var v float64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_BSTR: + var v string + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_VARIANT: + var v VARIANT + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v.Value() + default: + // TODO + } + } + + return +} + +func (sac *SafeArrayConversion) GetType() (varType uint16, err error) { + return safeArrayGetVartype(sac.Array) +} + +func (sac *SafeArrayConversion) GetDimensions() (dimensions *uint32, err error) { + return safeArrayGetDim(sac.Array) +} + +func (sac *SafeArrayConversion) GetSize() (length *uint32, err error) { + return safeArrayGetElementSize(sac.Array) +} + +func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int32, err error) { + if index < 1 { + index = 1 + } + + // Get array bounds + var LowerBounds int32 + var UpperBounds int32 + + LowerBounds, err = safeArrayGetLBound(sac.Array, index) + if err != nil { + return + } + + UpperBounds, err = safeArrayGetUBound(sac.Array, index) + if err != nil { + return + } + + totalElements = UpperBounds - LowerBounds + 1 + return +} + +// Release Safe Array memory +func (sac *SafeArrayConversion) Release() { + safeArrayDestroy(sac.Array) +} diff --git a/vendor/github.com/go-ole/go-ole/safearrayslices.go b/vendor/github.com/go-ole/go-ole/safearrayslices.go new file mode 100644 index 0000000..a9fa885 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearrayslices.go @@ -0,0 +1,33 @@ +// +build windows + +package ole + +import ( + "unsafe" +) + +func safeArrayFromByteSlice(slice []byte) *SafeArray { + array, _ := safeArrayCreateVector(VT_UI1, 0, uint32(len(slice))) + + if array == nil { + panic("Could not convert []byte to SAFEARRAY") + } + + for i, v := range slice { + safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(&v))) + } + return array +} + +func safeArrayFromStringSlice(slice []string) *SafeArray { + array, _ := safeArrayCreateVector(VT_BSTR, 0, uint32(len(slice))) + + if array == nil { + panic("Could not convert []string to SAFEARRAY") + } + // SysAllocStringLen(s) + for i, v := range slice { + safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(SysAllocStringLen(v)))) + } + return array +} diff --git a/vendor/github.com/go-ole/go-ole/utility.go b/vendor/github.com/go-ole/go-ole/utility.go new file mode 100644 index 0000000..99ee82d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/utility.go @@ -0,0 +1,101 @@ +package ole + +import ( + "unicode/utf16" + "unsafe" +) + +// ClassIDFrom retrieves class ID whether given is program ID or application string. +// +// Helper that provides check against both Class ID from Program ID and Class ID from string. It is +// faster, if you know which you are using, to use the individual functions, but this will check +// against available functions for you. +func ClassIDFrom(programID string) (classID *GUID, err error) { + classID, err = CLSIDFromProgID(programID) + if err != nil { + classID, err = CLSIDFromString(programID) + if err != nil { + return + } + } + return +} + +// BytePtrToString converts byte pointer to a Go string. +func BytePtrToString(p *byte) string { + a := (*[10000]uint8)(unsafe.Pointer(p)) + i := 0 + for a[i] != 0 { + i++ + } + return string(a[:i]) +} + +// UTF16PtrToString is alias for LpOleStrToString. +// +// Kept for compatibility reasons. +func UTF16PtrToString(p *uint16) string { + return LpOleStrToString(p) +} + +// LpOleStrToString converts COM Unicode to Go string. +func LpOleStrToString(p *uint16) string { + if p == nil { + return "" + } + + length := lpOleStrLen(p) + a := make([]uint16, length) + + ptr := unsafe.Pointer(p) + + for i := 0; i < int(length); i++ { + a[i] = *(*uint16)(ptr) + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + + return string(utf16.Decode(a)) +} + +// BstrToString converts COM binary string to Go string. +func BstrToString(p *uint16) string { + if p == nil { + return "" + } + length := SysStringLen((*int16)(unsafe.Pointer(p))) + a := make([]uint16, length) + + ptr := unsafe.Pointer(p) + + for i := 0; i < int(length); i++ { + a[i] = *(*uint16)(ptr) + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + return string(utf16.Decode(a)) +} + +// lpOleStrLen returns the length of Unicode string. +func lpOleStrLen(p *uint16) (length int64) { + if p == nil { + return 0 + } + + ptr := unsafe.Pointer(p) + + for i := 0; ; i++ { + if 0 == *(*uint16)(ptr) { + length = int64(i) + break + } + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + return +} + +// convertHresultToError converts syscall to error, if call is unsuccessful. +func convertHresultToError(hr uintptr, r2 uintptr, ignore error) (err error) { + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/go-ole/go-ole/variables.go new file mode 100644 index 0000000..ebe00f1 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variables.go @@ -0,0 +1,16 @@ +// +build windows + +package ole + +import ( + "syscall" +) + +var ( + modcombase = syscall.NewLazyDLL("combase.dll") + modkernel32, _ = syscall.LoadDLL("kernel32.dll") + modole32, _ = syscall.LoadDLL("ole32.dll") + modoleaut32, _ = syscall.LoadDLL("oleaut32.dll") + modmsvcrt, _ = syscall.LoadDLL("msvcrt.dll") + moduser32, _ = syscall.LoadDLL("user32.dll") +) diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go new file mode 100644 index 0000000..967a23f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant.go @@ -0,0 +1,105 @@ +package ole + +import "unsafe" + +// NewVariant returns new variant based on type and value. +func NewVariant(vt VT, val int64) VARIANT { + return VARIANT{VT: vt, Val: val} +} + +// ToIUnknown converts Variant to Unknown object. +func (v *VARIANT) ToIUnknown() *IUnknown { + if v.VT != VT_UNKNOWN { + return nil + } + return (*IUnknown)(unsafe.Pointer(uintptr(v.Val))) +} + +// ToIDispatch converts variant to dispatch object. +func (v *VARIANT) ToIDispatch() *IDispatch { + if v.VT != VT_DISPATCH { + return nil + } + return (*IDispatch)(unsafe.Pointer(uintptr(v.Val))) +} + +// ToArray converts variant to SafeArray helper. +func (v *VARIANT) ToArray() *SafeArrayConversion { + if v.VT != VT_SAFEARRAY { + if v.VT&VT_ARRAY == 0 { + return nil + } + } + var safeArray *SafeArray = (*SafeArray)(unsafe.Pointer(uintptr(v.Val))) + return &SafeArrayConversion{safeArray} +} + +// ToString converts variant to Go string. +func (v *VARIANT) ToString() string { + if v.VT != VT_BSTR { + return "" + } + return BstrToString(*(**uint16)(unsafe.Pointer(&v.Val))) +} + +// Clear the memory of variant object. +func (v *VARIANT) Clear() error { + return VariantClear(v) +} + +// Value returns variant value based on its type. +// +// Currently supported types: 2- and 4-byte integers, strings, bools. +// Note that 64-bit integers, datetimes, and other types are stored as strings +// and will be returned as strings. +// +// Needs to be further converted, because this returns an interface{}. +func (v *VARIANT) Value() interface{} { + switch v.VT { + case VT_I1: + return int8(v.Val) + case VT_UI1: + return uint8(v.Val) + case VT_I2: + return int16(v.Val) + case VT_UI2: + return uint16(v.Val) + case VT_I4: + return int32(v.Val) + case VT_UI4: + return uint32(v.Val) + case VT_I8: + return int64(v.Val) + case VT_UI8: + return uint64(v.Val) + case VT_INT: + return int(v.Val) + case VT_UINT: + return uint(v.Val) + case VT_INT_PTR: + return uintptr(v.Val) // TODO + case VT_UINT_PTR: + return uintptr(v.Val) + case VT_R4: + return *(*float32)(unsafe.Pointer(&v.Val)) + case VT_R8: + return *(*float64)(unsafe.Pointer(&v.Val)) + case VT_BSTR: + return v.ToString() + case VT_DATE: + // VT_DATE type will either return float64 or time.Time. + d := uint64(v.Val) + date, err := GetVariantDate(d) + if err != nil { + return float64(v.Val) + } + return date + case VT_UNKNOWN: + return v.ToIUnknown() + case VT_DISPATCH: + return v.ToIDispatch() + case VT_BOOL: + return v.Val != 0 + } + return nil +} diff --git a/vendor/github.com/go-ole/go-ole/variant_386.go b/vendor/github.com/go-ole/go-ole/variant_386.go new file mode 100644 index 0000000..e73736b --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_386.go @@ -0,0 +1,11 @@ +// +build 386 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_amd64.go b/vendor/github.com/go-ole/go-ole/variant_amd64.go new file mode 100644 index 0000000..dccdde1 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_amd64.go @@ -0,0 +1,12 @@ +// +build amd64 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_386.go b/vendor/github.com/go-ole/go-ole/variant_date_386.go new file mode 100644 index 0000000..1b970f6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_386.go @@ -0,0 +1,22 @@ +// +build windows,386 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + v1 := uint32(value) + v2 := uint32(value >> 32) + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_amd64.go b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go new file mode 100644 index 0000000..6952f1f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go @@ -0,0 +1,20 @@ +// +build windows,amd64 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_ppc64le.go b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go new file mode 100644 index 0000000..326427a --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_s390x.go b/vendor/github.com/go-ole/go-ole/variant_s390x.go new file mode 100644 index 0000000..9874ca6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/vt_string.go b/vendor/github.com/go-ole/go-ole/vt_string.go new file mode 100644 index 0000000..729b4a0 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/vt_string.go @@ -0,0 +1,58 @@ +// generated by stringer -output vt_string.go -type VT; DO NOT EDIT + +package ole + +import "fmt" + +const ( + _VT_name_0 = "VT_EMPTYVT_NULLVT_I2VT_I4VT_R4VT_R8VT_CYVT_DATEVT_BSTRVT_DISPATCHVT_ERRORVT_BOOLVT_VARIANTVT_UNKNOWNVT_DECIMAL" + _VT_name_1 = "VT_I1VT_UI1VT_UI2VT_UI4VT_I8VT_UI8VT_INTVT_UINTVT_VOIDVT_HRESULTVT_PTRVT_SAFEARRAYVT_CARRAYVT_USERDEFINEDVT_LPSTRVT_LPWSTR" + _VT_name_2 = "VT_RECORDVT_INT_PTRVT_UINT_PTR" + _VT_name_3 = "VT_FILETIMEVT_BLOBVT_STREAMVT_STORAGEVT_STREAMED_OBJECTVT_STORED_OBJECTVT_BLOB_OBJECTVT_CFVT_CLSID" + _VT_name_4 = "VT_BSTR_BLOBVT_VECTOR" + _VT_name_5 = "VT_ARRAY" + _VT_name_6 = "VT_BYREF" + _VT_name_7 = "VT_RESERVED" + _VT_name_8 = "VT_ILLEGAL" +) + +var ( + _VT_index_0 = [...]uint8{0, 8, 15, 20, 25, 30, 35, 40, 47, 54, 65, 73, 80, 90, 100, 110} + _VT_index_1 = [...]uint8{0, 5, 11, 17, 23, 28, 34, 40, 47, 54, 64, 70, 82, 91, 105, 113, 122} + _VT_index_2 = [...]uint8{0, 9, 19, 30} + _VT_index_3 = [...]uint8{0, 11, 18, 27, 37, 55, 71, 85, 90, 98} + _VT_index_4 = [...]uint8{0, 12, 21} + _VT_index_5 = [...]uint8{0, 8} + _VT_index_6 = [...]uint8{0, 8} + _VT_index_7 = [...]uint8{0, 11} + _VT_index_8 = [...]uint8{0, 10} +) + +func (i VT) String() string { + switch { + case 0 <= i && i <= 14: + return _VT_name_0[_VT_index_0[i]:_VT_index_0[i+1]] + case 16 <= i && i <= 31: + i -= 16 + return _VT_name_1[_VT_index_1[i]:_VT_index_1[i+1]] + case 36 <= i && i <= 38: + i -= 36 + return _VT_name_2[_VT_index_2[i]:_VT_index_2[i+1]] + case 64 <= i && i <= 72: + i -= 64 + return _VT_name_3[_VT_index_3[i]:_VT_index_3[i+1]] + case 4095 <= i && i <= 4096: + i -= 4095 + return _VT_name_4[_VT_index_4[i]:_VT_index_4[i+1]] + case i == 8192: + return _VT_name_5 + case i == 16384: + return _VT_name_6 + case i == 32768: + return _VT_name_7 + case i == 65535: + return _VT_name_8 + default: + return fmt.Sprintf("VT(%d)", i) + } +} diff --git a/vendor/github.com/go-ole/go-ole/winrt.go b/vendor/github.com/go-ole/go-ole/winrt.go new file mode 100644 index 0000000..4e9eca7 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/winrt.go @@ -0,0 +1,99 @@ +// +build windows + +package ole + +import ( + "reflect" + "syscall" + "unicode/utf8" + "unsafe" +) + +var ( + procRoInitialize = modcombase.NewProc("RoInitialize") + procRoActivateInstance = modcombase.NewProc("RoActivateInstance") + procRoGetActivationFactory = modcombase.NewProc("RoGetActivationFactory") + procWindowsCreateString = modcombase.NewProc("WindowsCreateString") + procWindowsDeleteString = modcombase.NewProc("WindowsDeleteString") + procWindowsGetStringRawBuffer = modcombase.NewProc("WindowsGetStringRawBuffer") +) + +func RoInitialize(thread_type uint32) (err error) { + hr, _, _ := procRoInitialize.Call(uintptr(thread_type)) + if hr != 0 { + err = NewError(hr) + } + return +} + +func RoActivateInstance(clsid string) (ins *IInspectable, err error) { + hClsid, err := NewHString(clsid) + if err != nil { + return nil, err + } + defer DeleteHString(hClsid) + + hr, _, _ := procRoActivateInstance.Call( + uintptr(unsafe.Pointer(hClsid)), + uintptr(unsafe.Pointer(&ins))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { + hClsid, err := NewHString(clsid) + if err != nil { + return nil, err + } + defer DeleteHString(hClsid) + + hr, _, _ := procRoGetActivationFactory.Call( + uintptr(unsafe.Pointer(hClsid)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&ins))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// HString is handle string for pointers. +type HString uintptr + +// NewHString returns a new HString for Go string. +func NewHString(s string) (hstring HString, err error) { + u16 := syscall.StringToUTF16Ptr(s) + len := uint32(utf8.RuneCountInString(s)) + hr, _, _ := procWindowsCreateString.Call( + uintptr(unsafe.Pointer(u16)), + uintptr(len), + uintptr(unsafe.Pointer(&hstring))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// DeleteHString deletes HString. +func DeleteHString(hstring HString) (err error) { + hr, _, _ := procWindowsDeleteString.Call(uintptr(hstring)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// String returns Go string value of HString. +func (h HString) String() string { + var u16buf uintptr + var u16len uint32 + u16buf, _, _ = procWindowsGetStringRawBuffer.Call( + uintptr(h), + uintptr(unsafe.Pointer(&u16len))) + + u16hdr := reflect.SliceHeader{Data: u16buf, Len: int(u16len), Cap: int(u16len)} + u16 := *(*[]uint16)(unsafe.Pointer(&u16hdr)) + return syscall.UTF16ToString(u16) +} diff --git a/vendor/github.com/go-ole/go-ole/winrt_doc.go b/vendor/github.com/go-ole/go-ole/winrt_doc.go new file mode 100644 index 0000000..52e6d74 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/winrt_doc.go @@ -0,0 +1,36 @@ +// +build !windows + +package ole + +// RoInitialize +func RoInitialize(thread_type uint32) (err error) { + return NewError(E_NOTIMPL) +} + +// RoActivateInstance +func RoActivateInstance(clsid string) (ins *IInspectable, err error) { + return nil, NewError(E_NOTIMPL) +} + +// RoGetActivationFactory +func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { + return nil, NewError(E_NOTIMPL) +} + +// HString is handle string for pointers. +type HString uintptr + +// NewHString returns a new HString for Go string. +func NewHString(s string) (hstring HString, err error) { + return HString(uintptr(0)), NewError(E_NOTIMPL) +} + +// DeleteHString deletes HString. +func DeleteHString(hstring HString) (err error) { + return NewError(E_NOTIMPL) +} + +// String returns Go string value of HString. +func (h HString) String() string { + return "" +} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 0000000..0f64693 --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 0000000..3cd3249 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,253 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 0000000..d9aa3c4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,428 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 0000000..dea2617 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 0000000..3abfed2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,203 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 0000000..d4db5a1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 0000000..816a3b9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,543 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, extension.Field) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 0000000..75565cc --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,979 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 0000000..3b6ca41 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,314 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "sync" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + + case map[int32]Extension: + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, + } + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + + default: + return nil, errors.New("proto: not an extension map") + } +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 0000000..b6cad90 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 0000000..d55a335 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 0000000..50b99b8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,544 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + switch t1 := typ; t1.Kind() { + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + + case reflect.Slice: + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() + } + + case reflect.Map: + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + _, _, _, oots = om.XXX_OneofFuncs() + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 0000000..b167944 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2767 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 0000000..5525def --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 0000000..ebf1caa --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2051 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 0000000..1aaee72 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,843 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 0000000..bb55a3a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,880 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 0000000..70276e8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,141 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 0000000..e3c56d3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,191 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +package any // import "github.com/golang/protobuf/ptypes/any" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_any_744b9ca530f228db, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } + +var fileDescriptor_any_744b9ca530f228db = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 0000000..c748667 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,149 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 0000000..c0d595d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 0000000..65cb0f8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 0000000..a7beb2c --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package duration // import "github.com/golang/protobuf/ptypes/duration" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_duration_e7d612259e3f0613, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) +} + +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 0000000..975fce4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 0000000..47f10db --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,134 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 0000000..8e76ae9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,175 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) +} + +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 0000000..06750ab --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,133 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 0000000..0f0fa83 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrappers // import "github.com/golang/protobuf/ptypes/wrappers" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleValue.Unmarshal(m, b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) +} +func (dst *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(dst, src) +} +func (m *DoubleValue) XXX_Size() int { + return xxx_messageInfo_DoubleValue.Size(m) +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatValue.Unmarshal(m, b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) +} +func (dst *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(dst, src) +} +func (m *FloatValue) XXX_Size() int { + return xxx_messageInfo_FloatValue.Size(m) +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64Value.Unmarshal(m, b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) +} +func (dst *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(dst, src) +} +func (m *Int64Value) XXX_Size() int { + return xxx_messageInfo_Int64Value.Size(m) +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt64Value.Unmarshal(m, b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) +} +func (dst *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(dst, src) +} +func (m *UInt64Value) XXX_Size() int { + return xxx_messageInfo_UInt64Value.Size(m) +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int32Value.Unmarshal(m, b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) +} +func (dst *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(dst, src) +} +func (m *Int32Value) XXX_Size() int { + return xxx_messageInfo_Int32Value.Size(m) +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt32Value.Unmarshal(m, b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) +} +func (dst *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(dst, src) +} +func (m *UInt32Value) XXX_Size() int { + return xxx_messageInfo_UInt32Value.Size(m) +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolValue.Unmarshal(m, b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) +} +func (dst *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(dst, src) +} +func (m *BoolValue) XXX_Size() int { + return xxx_messageInfo_BoolValue.Size(m) +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (dst *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(dst, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesValue.Unmarshal(m, b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) +} +func (dst *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(dst, src) +} +func (m *BytesValue) XXX_Size() int { + return xxx_messageInfo_BytesValue.Size(m) +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { + proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253) +} + +var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto new file mode 100644 index 0000000..0194763 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/gorhill/cronexpr/APLv2 b/vendor/github.com/gorhill/cronexpr/APLv2 new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/APLv2 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/gorhill/cronexpr/GPLv3 b/vendor/github.com/gorhill/cronexpr/GPLv3 new file mode 100644 index 0000000..c13fcfa --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/GPLv3 @@ -0,0 +1,674 @@ +GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. {http://fsf.org/} + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see {http://www.gnu.org/licenses/}. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + cronexpr Copyright (C) 2013 Raymond Hill + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +{http://www.gnu.org/licenses/}. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +{http://www.gnu.org/philosophy/why-not-lgpl.html}. diff --git a/vendor/github.com/gorhill/cronexpr/README.md b/vendor/github.com/gorhill/cronexpr/README.md new file mode 100644 index 0000000..e8c56d2 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/README.md @@ -0,0 +1,134 @@ +Golang Cron expression parser +============================= +Given a cron expression and a time stamp, you can get the next time stamp which satisfies the cron expression. + +In another project, I decided to use cron expression syntax to encode scheduling information. Thus this standalone library to parse and apply time stamps to cron expressions. + +The time-matching algorithm in this implementation is efficient, it avoids as much as possible to guess the next matching time stamp, a common technique seen in a number of implementations out there. + +There is also a companion command-line utility to evaluate cron time expressions: (which of course uses this library). + +Implementation +-------------- +The reference documentation for this implementation is found at +, which I copy/pasted here (laziness!) with modifications where this implementation differs: + + Field name Mandatory? Allowed values Allowed special characters + ---------- ---------- -------------- -------------------------- + Seconds No 0-59 * / , - + Minutes Yes 0-59 * / , - + Hours Yes 0-23 * / , - + Day of month Yes 1-31 * / , - L W + Month Yes 1-12 or JAN-DEC * / , - + Day of week Yes 0-6 or SUN-SAT * / , - L # + Year No 1970–2099 * / , - + +#### Asterisk ( * ) +The asterisk indicates that the cron expression matches for all values of the field. E.g., using an asterisk in the 4th field (month) indicates every month. + +#### Slash ( / ) +Slashes describe increments of ranges. For example `3-59/15` in the minute field indicate the third minute of the hour and every 15 minutes thereafter. The form `*/...` is equivalent to the form "first-last/...", that is, an increment over the largest possible range of the field. + +#### Comma ( , ) +Commas are used to separate items of a list. For example, using `MON,WED,FRI` in the 5th field (day of week) means Mondays, Wednesdays and Fridays. + +#### Hyphen ( - ) +Hyphens define ranges. For example, 2000-2010 indicates every year between 2000 and 2010 AD, inclusive. + +#### L +`L` stands for "last". When used in the day-of-week field, it allows you to specify constructs such as "the last Friday" (`5L`) of a given month. In the day-of-month field, it specifies the last day of the month. + +#### W +The `W` character is allowed for the day-of-month field. This character is used to specify the business day (Monday-Friday) nearest the given day. As an example, if you were to specify `15W` as the value for the day-of-month field, the meaning is: "the nearest business day to the 15th of the month." + +So, if the 15th is a Saturday, the trigger fires on Friday the 14th. If the 15th is a Sunday, the trigger fires on Monday the 16th. If the 15th is a Tuesday, then it fires on Tuesday the 15th. However if you specify `1W` as the value for day-of-month, and the 1st is a Saturday, the trigger fires on Monday the 3rd, as it does not 'jump' over the boundary of a month's days. + +The `W` character can be specified only when the day-of-month is a single day, not a range or list of days. + +The `W` character can also be combined with `L`, i.e. `LW` to mean "the last business day of the month." + +#### Hash ( # ) +`#` is allowed for the day-of-week field, and must be followed by a number between one and five. It allows you to specify constructs such as "the second Friday" of a given month. + +Predefined cron expressions +--------------------------- +(Copied from , with text modified according to this implementation) + + Entry Description Equivalent to + @annually Run once a year at midnight in the morning of January 1 0 0 0 1 1 * * + @yearly Run once a year at midnight in the morning of January 1 0 0 0 1 1 * * + @monthly Run once a month at midnight in the morning of the first of the month 0 0 0 1 * * * + @weekly Run once a week at midnight in the morning of Sunday 0 0 0 * * 0 * + @daily Run once a day at midnight 0 0 0 * * * * + @hourly Run once an hour at the beginning of the hour 0 0 * * * * * + @reboot Not supported + +Other details +------------- +* If only six fields are present, a `0` second field is prepended, that is, `* * * * * 2013` internally become `0 * * * * * 2013`. +* If only five fields are present, a `0` second field is prepended and a wildcard year field is appended, that is, `* * * * Mon` internally become `0 * * * * Mon *`. +* Domain for day-of-week field is [0-7] instead of [0-6], 7 being Sunday (like 0). This to comply with http://linux.die.net/man/5/crontab#. +* As of now, the behavior of the code is undetermined if a malformed cron expression is supplied + +Install +------- + go get github.com/gorhill/cronexpr + +Usage +----- +Import the library: + + import "github.com/gorhill/cronexpr" + import "time" + +Simplest way: + + nextTime := cronexpr.MustParse("0 0 29 2 *").Next(time.Now()) + +Assuming `time.Now()` is "2013-08-29 09:28:00", then `nextTime` will be "2016-02-29 00:00:00". + +You can keep the returned Expression pointer around if you want to reuse it: + + expr := cronexpr.MustParse("0 0 29 2 *") + nextTime := expr.Next(time.Now()) + ... + nextTime = expr.Next(nextTime) + +Use `time.IsZero()` to find out whether a valid time was returned. For example, + + cronexpr.MustParse("* * * * * 1980").Next(time.Now()).IsZero() + +will return `true`, whereas + + cronexpr.MustParse("* * * * * 2050").Next(time.Now()).IsZero() + +will return `false` (as of 2013-08-29...) + +You may also query for `n` next time stamps: + + cronexpr.MustParse("0 0 29 2 *").NextN(time.Now(), 5) + +which returns a slice of time.Time objects, containing the following time stamps (as of 2013-08-30): + + 2016-02-29 00:00:00 + 2020-02-29 00:00:00 + 2024-02-29 00:00:00 + 2028-02-29 00:00:00 + 2032-02-29 00:00:00 + +The time zone of time values returned by `Next` and `NextN` is always the +time zone of the time value passed as argument, unless a zero time value is +returned. + +API +--- + + +License +------- + +License: pick the one which suits you best: + +- GPL v3 see +- APL v2 see + diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr.go b/vendor/github.com/gorhill/cronexpr/cronexpr.go new file mode 100644 index 0000000..58b518f --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/cronexpr.go @@ -0,0 +1,266 @@ +/*! + * Copyright 2013 Raymond Hill + * + * Project: github.com/gorhill/cronexpr + * File: cronexpr.go + * Version: 1.0 + * License: pick the one which suits you : + * GPL v3 see + * APL v2 see + * + */ + +// Package cronexpr parses cron time expressions. +package cronexpr + +/******************************************************************************/ + +import ( + "fmt" + "sort" + "time" +) + +/******************************************************************************/ + +// A Expression represents a specific cron time expression as defined at +// +type Expression struct { + expression string + secondList []int + minuteList []int + hourList []int + daysOfMonth map[int]bool + workdaysOfMonth map[int]bool + lastDayOfMonth bool + lastWorkdayOfMonth bool + daysOfMonthRestricted bool + actualDaysOfMonthList []int + monthList []int + daysOfWeek map[int]bool + specificWeekDaysOfWeek map[int]bool + lastWeekDaysOfWeek map[int]bool + daysOfWeekRestricted bool + yearList []int +} + +/******************************************************************************/ + +// MustParse returns a new Expression pointer. It expects a well-formed cron +// expression. If a malformed cron expression is supplied, it will `panic`. +// See for documentation +// about what is a well-formed cron expression from this library's point of +// view. +func MustParse(cronLine string) *Expression { + expr, err := Parse(cronLine) + if err != nil { + panic(err) + } + return expr +} + +/******************************************************************************/ + +// Parse returns a new Expression pointer. An error is returned if a malformed +// cron expression is supplied. +// See for documentation +// about what is a well-formed cron expression from this library's point of +// view. +func Parse(cronLine string) (*Expression, error) { + + // Maybe one of the built-in aliases is being used + cron := cronNormalizer.Replace(cronLine) + + indices := fieldFinder.FindAllStringIndex(cron, -1) + fieldCount := len(indices) + if fieldCount < 5 { + return nil, fmt.Errorf("missing field(s)") + } + // ignore fields beyond 7th + if fieldCount > 7 { + fieldCount = 7 + } + + var expr = Expression{} + var field = 0 + var err error + + // second field (optional) + if fieldCount == 7 { + err = expr.secondFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + } else { + expr.secondList = []int{0} + } + + // minute field + err = expr.minuteFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // hour field + err = expr.hourFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // day of month field + err = expr.domFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // month field + err = expr.monthFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // day of week field + err = expr.dowFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // year field + if field < fieldCount { + err = expr.yearFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + } else { + expr.yearList = yearDescriptor.defaultList + } + + return &expr, nil +} + +/******************************************************************************/ + +// Next returns the closest time instant immediately following `fromTime` which +// matches the cron expression `expr`. +// +// The `time.Location` of the returned time instant is the same as that of +// `fromTime`. +// +// The zero value of time.Time is returned if no matching time instant exists +// or if a `fromTime` is itself a zero value. +func (expr *Expression) Next(fromTime time.Time) time.Time { + // Special case + if fromTime.IsZero() { + return fromTime + } + + // Since expr.nextSecond()-expr.nextMonth() expects that the + // supplied time stamp is a perfect match to the underlying cron + // expression, and since this function is an entry point where `fromTime` + // does not necessarily matches the underlying cron expression, + // we first need to ensure supplied time stamp matches + // the cron expression. If not, this means the supplied time + // stamp falls in between matching time stamps, thus we move + // to closest future matching immediately upon encountering a mismatching + // time stamp. + + // year + v := fromTime.Year() + i := sort.SearchInts(expr.yearList, v) + if i == len(expr.yearList) { + return time.Time{} + } + if v != expr.yearList[i] { + return expr.nextYear(fromTime) + } + // month + v = int(fromTime.Month()) + i = sort.SearchInts(expr.monthList, v) + if i == len(expr.monthList) { + return expr.nextYear(fromTime) + } + if v != expr.monthList[i] { + return expr.nextMonth(fromTime) + } + + expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(fromTime.Year(), int(fromTime.Month())) + if len(expr.actualDaysOfMonthList) == 0 { + return expr.nextMonth(fromTime) + } + + // day of month + v = fromTime.Day() + i = sort.SearchInts(expr.actualDaysOfMonthList, v) + if i == len(expr.actualDaysOfMonthList) { + return expr.nextMonth(fromTime) + } + if v != expr.actualDaysOfMonthList[i] { + return expr.nextDayOfMonth(fromTime) + } + // hour + v = fromTime.Hour() + i = sort.SearchInts(expr.hourList, v) + if i == len(expr.hourList) { + return expr.nextDayOfMonth(fromTime) + } + if v != expr.hourList[i] { + return expr.nextHour(fromTime) + } + // minute + v = fromTime.Minute() + i = sort.SearchInts(expr.minuteList, v) + if i == len(expr.minuteList) { + return expr.nextHour(fromTime) + } + if v != expr.minuteList[i] { + return expr.nextMinute(fromTime) + } + // second + v = fromTime.Second() + i = sort.SearchInts(expr.secondList, v) + if i == len(expr.secondList) { + return expr.nextMinute(fromTime) + } + + // If we reach this point, there is nothing better to do + // than to move to the next second + + return expr.nextSecond(fromTime) +} + +/******************************************************************************/ + +// NextN returns a slice of `n` closest time instants immediately following +// `fromTime` which match the cron expression `expr`. +// +// The time instants in the returned slice are in chronological ascending order. +// The `time.Location` of the returned time instants is the same as that of +// `fromTime`. +// +// A slice with len between [0-`n`] is returned, that is, if not enough existing +// matching time instants exist, the number of returned entries will be less +// than `n`. +func (expr *Expression) NextN(fromTime time.Time, n uint) []time.Time { + nextTimes := make([]time.Time, 0, n) + if n > 0 { + fromTime = expr.Next(fromTime) + for { + if fromTime.IsZero() { + break + } + nextTimes = append(nextTimes, fromTime) + n -= 1 + if n == 0 { + break + } + fromTime = expr.nextSecond(fromTime) + } + } + return nextTimes +} diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr_next.go b/vendor/github.com/gorhill/cronexpr/cronexpr_next.go new file mode 100644 index 0000000..a0ebdb6 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/cronexpr_next.go @@ -0,0 +1,292 @@ +/*! + * Copyright 2013 Raymond Hill + * + * Project: github.com/gorhill/cronexpr + * File: cronexpr_next.go + * Version: 1.0 + * License: pick the one which suits you : + * GPL v3 see + * APL v2 see + * + */ + +package cronexpr + +/******************************************************************************/ + +import ( + "sort" + "time" +) + +/******************************************************************************/ + +var dowNormalizedOffsets = [][]int{ + {1, 8, 15, 22, 29}, + {2, 9, 16, 23, 30}, + {3, 10, 17, 24, 31}, + {4, 11, 18, 25}, + {5, 12, 19, 26}, + {6, 13, 20, 27}, + {7, 14, 21, 28}, +} + +/******************************************************************************/ + +func (expr *Expression) nextYear(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate year + i := sort.SearchInts(expr.yearList, t.Year()+1) + if i == len(expr.yearList) { + return time.Time{} + } + // Year changed, need to recalculate actual days of month + expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(expr.yearList[i], expr.monthList[0]) + if len(expr.actualDaysOfMonthList) == 0 { + return expr.nextMonth(time.Date( + expr.yearList[i], + time.Month(expr.monthList[0]), + 1, + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location())) + } + return time.Date( + expr.yearList[i], + time.Month(expr.monthList[0]), + expr.actualDaysOfMonthList[0], + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextMonth(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate month + i := sort.SearchInts(expr.monthList, int(t.Month())+1) + if i == len(expr.monthList) { + return expr.nextYear(t) + } + // Month changed, need to recalculate actual days of month + expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(t.Year(), expr.monthList[i]) + if len(expr.actualDaysOfMonthList) == 0 { + return expr.nextMonth(time.Date( + t.Year(), + time.Month(expr.monthList[i]), + 1, + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location())) + } + + return time.Date( + t.Year(), + time.Month(expr.monthList[i]), + expr.actualDaysOfMonthList[0], + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextDayOfMonth(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate day of month + i := sort.SearchInts(expr.actualDaysOfMonthList, t.Day()+1) + if i == len(expr.actualDaysOfMonthList) { + return expr.nextMonth(t) + } + + return time.Date( + t.Year(), + t.Month(), + expr.actualDaysOfMonthList[i], + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextHour(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate hour + i := sort.SearchInts(expr.hourList, t.Hour()+1) + if i == len(expr.hourList) { + return expr.nextDayOfMonth(t) + } + + return time.Date( + t.Year(), + t.Month(), + t.Day(), + expr.hourList[i], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextMinute(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate minute + i := sort.SearchInts(expr.minuteList, t.Minute()+1) + if i == len(expr.minuteList) { + return expr.nextHour(t) + } + + return time.Date( + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + expr.minuteList[i], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextSecond(t time.Time) time.Time { + // nextSecond() assumes all other fields are exactly matched + // to the cron expression + + // Find index at which item in list is greater or equal to + // candidate second + i := sort.SearchInts(expr.secondList, t.Second()+1) + if i == len(expr.secondList) { + return expr.nextMinute(t) + } + + return time.Date( + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + expr.secondList[i], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) calculateActualDaysOfMonth(year, month int) []int { + actualDaysOfMonthMap := make(map[int]bool) + firstDayOfMonth := time.Date(year, time.Month(month), 1, 0, 0, 0, 0, time.UTC) + lastDayOfMonth := firstDayOfMonth.AddDate(0, 1, -1) + + // As per crontab man page (http://linux.die.net/man/5/crontab#): + // "The day of a command's execution can be specified by two + // "fields - day of month, and day of week. If both fields are + // "restricted (ie, aren't *), the command will be run when + // "either field matches the current time" + + // If both fields are not restricted, all days of the month are a hit + if expr.daysOfMonthRestricted == false && expr.daysOfWeekRestricted == false { + return genericDefaultList[1 : lastDayOfMonth.Day()+1] + } + + // day-of-month != `*` + if expr.daysOfMonthRestricted { + // Last day of month + if expr.lastDayOfMonth { + actualDaysOfMonthMap[lastDayOfMonth.Day()] = true + } + // Last work day of month + if expr.lastWorkdayOfMonth { + actualDaysOfMonthMap[workdayOfMonth(lastDayOfMonth, lastDayOfMonth)] = true + } + // Days of month + for v := range expr.daysOfMonth { + // Ignore days beyond end of month + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[v] = true + } + } + // Work days of month + // As per Wikipedia: month boundaries are not crossed. + for v := range expr.workdaysOfMonth { + // Ignore days beyond end of month + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[workdayOfMonth(firstDayOfMonth.AddDate(0, 0, v-1), lastDayOfMonth)] = true + } + } + } + + // day-of-week != `*` + if expr.daysOfWeekRestricted { + // How far first sunday is from first day of month + offset := 7 - int(firstDayOfMonth.Weekday()) + // days of week + // offset : (7 - day_of_week_of_1st_day_of_month) + // target : 1 + (7 * week_of_month) + (offset + day_of_week) % 7 + for v := range expr.daysOfWeek { + w := dowNormalizedOffsets[(offset+v)%7] + actualDaysOfMonthMap[w[0]] = true + actualDaysOfMonthMap[w[1]] = true + actualDaysOfMonthMap[w[2]] = true + actualDaysOfMonthMap[w[3]] = true + if len(w) > 4 && w[4] <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[w[4]] = true + } + } + // days of week of specific week in the month + // offset : (7 - day_of_week_of_1st_day_of_month) + // target : 1 + (7 * week_of_month) + (offset + day_of_week) % 7 + for v := range expr.specificWeekDaysOfWeek { + v = 1 + 7*(v/7) + (offset+v)%7 + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[v] = true + } + } + // Last days of week of the month + lastWeekOrigin := firstDayOfMonth.AddDate(0, 1, -7) + offset = 7 - int(lastWeekOrigin.Weekday()) + for v := range expr.lastWeekDaysOfWeek { + v = lastWeekOrigin.Day() + (offset+v)%7 + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[v] = true + } + } + } + + return toList(actualDaysOfMonthMap) +} + +func workdayOfMonth(targetDom, lastDom time.Time) int { + // If saturday, then friday + // If sunday, then monday + dom := targetDom.Day() + dow := targetDom.Weekday() + if dow == time.Saturday { + if dom > 1 { + dom -= 1 + } else { + dom += 2 + } + } else if dow == time.Sunday { + if dom < lastDom.Day() { + dom += 1 + } else { + dom -= 2 + } + } + return dom +} diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go b/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go new file mode 100644 index 0000000..289ad53 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go @@ -0,0 +1,488 @@ +/*! + * Copyright 2013 Raymond Hill + * + * Project: github.com/gorhill/cronexpr + * File: cronexpr_parse.go + * Version: 1.0 + * License: pick the one which suits you best: + * GPL v3 see + * APL v2 see + * + */ + +package cronexpr + +/******************************************************************************/ + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +/******************************************************************************/ + +var ( + genericDefaultList = []int{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + } + yearDefaultList = []int{ + 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, + 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, + 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, + 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, + 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, + 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, + 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, + 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, + 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, + 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, + 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, + 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, + 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, + } +) + +/******************************************************************************/ + +var ( + numberTokens = map[string]int{ + "0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, + "10": 10, "11": 11, "12": 12, "13": 13, "14": 14, "15": 15, "16": 16, "17": 17, "18": 18, "19": 19, + "20": 20, "21": 21, "22": 22, "23": 23, "24": 24, "25": 25, "26": 26, "27": 27, "28": 28, "29": 29, + "30": 30, "31": 31, "32": 32, "33": 33, "34": 34, "35": 35, "36": 36, "37": 37, "38": 38, "39": 39, + "40": 40, "41": 41, "42": 42, "43": 43, "44": 44, "45": 45, "46": 46, "47": 47, "48": 48, "49": 49, + "50": 50, "51": 51, "52": 52, "53": 53, "54": 54, "55": 55, "56": 56, "57": 57, "58": 58, "59": 59, + "1970": 1970, "1971": 1971, "1972": 1972, "1973": 1973, "1974": 1974, "1975": 1975, "1976": 1976, "1977": 1977, "1978": 1978, "1979": 1979, + "1980": 1980, "1981": 1981, "1982": 1982, "1983": 1983, "1984": 1984, "1985": 1985, "1986": 1986, "1987": 1987, "1988": 1988, "1989": 1989, + "1990": 1990, "1991": 1991, "1992": 1992, "1993": 1993, "1994": 1994, "1995": 1995, "1996": 1996, "1997": 1997, "1998": 1998, "1999": 1999, + "2000": 2000, "2001": 2001, "2002": 2002, "2003": 2003, "2004": 2004, "2005": 2005, "2006": 2006, "2007": 2007, "2008": 2008, "2009": 2009, + "2010": 2010, "2011": 2011, "2012": 2012, "2013": 2013, "2014": 2014, "2015": 2015, "2016": 2016, "2017": 2017, "2018": 2018, "2019": 2019, + "2020": 2020, "2021": 2021, "2022": 2022, "2023": 2023, "2024": 2024, "2025": 2025, "2026": 2026, "2027": 2027, "2028": 2028, "2029": 2029, + "2030": 2030, "2031": 2031, "2032": 2032, "2033": 2033, "2034": 2034, "2035": 2035, "2036": 2036, "2037": 2037, "2038": 2038, "2039": 2039, + "2040": 2040, "2041": 2041, "2042": 2042, "2043": 2043, "2044": 2044, "2045": 2045, "2046": 2046, "2047": 2047, "2048": 2048, "2049": 2049, + "2050": 2050, "2051": 2051, "2052": 2052, "2053": 2053, "2054": 2054, "2055": 2055, "2056": 2056, "2057": 2057, "2058": 2058, "2059": 2059, + "2060": 2060, "2061": 2061, "2062": 2062, "2063": 2063, "2064": 2064, "2065": 2065, "2066": 2066, "2067": 2067, "2068": 2068, "2069": 2069, + "2070": 2070, "2071": 2071, "2072": 2072, "2073": 2073, "2074": 2074, "2075": 2075, "2076": 2076, "2077": 2077, "2078": 2078, "2079": 2079, + "2080": 2080, "2081": 2081, "2082": 2082, "2083": 2083, "2084": 2084, "2085": 2085, "2086": 2086, "2087": 2087, "2088": 2088, "2089": 2089, + "2090": 2090, "2091": 2091, "2092": 2092, "2093": 2093, "2094": 2094, "2095": 2095, "2096": 2096, "2097": 2097, "2098": 2098, "2099": 2099, + } + monthTokens = map[string]int{ + `1`: 1, `jan`: 1, `january`: 1, + `2`: 2, `feb`: 2, `february`: 2, + `3`: 3, `mar`: 3, `march`: 3, + `4`: 4, `apr`: 4, `april`: 4, + `5`: 5, `may`: 5, + `6`: 6, `jun`: 6, `june`: 6, + `7`: 7, `jul`: 7, `july`: 7, + `8`: 8, `aug`: 8, `august`: 8, + `9`: 9, `sep`: 9, `september`: 9, + `10`: 10, `oct`: 10, `october`: 10, + `11`: 11, `nov`: 11, `november`: 11, + `12`: 12, `dec`: 12, `december`: 12, + } + dowTokens = map[string]int{ + `0`: 0, `sun`: 0, `sunday`: 0, + `1`: 1, `mon`: 1, `monday`: 1, + `2`: 2, `tue`: 2, `tuesday`: 2, + `3`: 3, `wed`: 3, `wednesday`: 3, + `4`: 4, `thu`: 4, `thursday`: 4, + `5`: 5, `fri`: 5, `friday`: 5, + `6`: 6, `sat`: 6, `saturday`: 6, + `7`: 0, + } +) + +/******************************************************************************/ + +func atoi(s string) int { + return numberTokens[s] +} + +type fieldDescriptor struct { + name string + min, max int + defaultList []int + valuePattern string + atoi func(string) int +} + +var ( + secondDescriptor = fieldDescriptor{ + name: "second", + min: 0, + max: 59, + defaultList: genericDefaultList[0:60], + valuePattern: `[0-9]|[1-5][0-9]`, + atoi: atoi, + } + minuteDescriptor = fieldDescriptor{ + name: "minute", + min: 0, + max: 59, + defaultList: genericDefaultList[0:60], + valuePattern: `[0-9]|[1-5][0-9]`, + atoi: atoi, + } + hourDescriptor = fieldDescriptor{ + name: "hour", + min: 0, + max: 23, + defaultList: genericDefaultList[0:24], + valuePattern: `[0-9]|1[0-9]|2[0-3]`, + atoi: atoi, + } + domDescriptor = fieldDescriptor{ + name: "day-of-month", + min: 1, + max: 31, + defaultList: genericDefaultList[1:32], + valuePattern: `[1-9]|[12][0-9]|3[01]`, + atoi: atoi, + } + monthDescriptor = fieldDescriptor{ + name: "month", + min: 1, + max: 12, + defaultList: genericDefaultList[1:13], + valuePattern: `[1-9]|1[012]|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec|january|february|march|april|march|april|june|july|august|september|october|november|december`, + atoi: func(s string) int { + return monthTokens[s] + }, + } + dowDescriptor = fieldDescriptor{ + name: "day-of-week", + min: 0, + max: 6, + defaultList: genericDefaultList[0:7], + valuePattern: `[0-7]|sun|mon|tue|wed|thu|fri|sat|sunday|monday|tuesday|wednesday|thursday|friday|saturday`, + atoi: func(s string) int { + return dowTokens[s] + }, + } + yearDescriptor = fieldDescriptor{ + name: "year", + min: 1970, + max: 2099, + defaultList: yearDefaultList[:], + valuePattern: `19[789][0-9]|20[0-9]{2}`, + atoi: atoi, + } +) + +/******************************************************************************/ + +var ( + layoutWildcard = `^\*$|^\?$` + layoutValue = `^(%value%)$` + layoutRange = `^(%value%)-(%value%)$` + layoutWildcardAndInterval = `^\*/(\d+)$` + layoutValueAndInterval = `^(%value%)/(\d+)$` + layoutRangeAndInterval = `^(%value%)-(%value%)/(\d+)$` + layoutLastDom = `^l$` + layoutWorkdom = `^(%value%)w$` + layoutLastWorkdom = `^lw$` + layoutDowOfLastWeek = `^(%value%)l$` + layoutDowOfSpecificWeek = `^(%value%)#([1-5])$` + fieldFinder = regexp.MustCompile(`\S+`) + entryFinder = regexp.MustCompile(`[^,]+`) + layoutRegexp = make(map[string]*regexp.Regexp) +) + +/******************************************************************************/ + +var cronNormalizer = strings.NewReplacer( + "@yearly", "0 0 0 1 1 * *", + "@annually", "0 0 0 1 1 * *", + "@monthly", "0 0 0 1 * * *", + "@weekly", "0 0 0 * * 0 *", + "@daily", "0 0 0 * * * *", + "@hourly", "0 0 * * * * *") + +/******************************************************************************/ + +func (expr *Expression) secondFieldHandler(s string) error { + var err error + expr.secondList, err = genericFieldHandler(s, secondDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) minuteFieldHandler(s string) error { + var err error + expr.minuteList, err = genericFieldHandler(s, minuteDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) hourFieldHandler(s string) error { + var err error + expr.hourList, err = genericFieldHandler(s, hourDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) monthFieldHandler(s string) error { + var err error + expr.monthList, err = genericFieldHandler(s, monthDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) yearFieldHandler(s string) error { + var err error + expr.yearList, err = genericFieldHandler(s, yearDescriptor) + return err +} + +/******************************************************************************/ + +const ( + none = 0 + one = 1 + span = 2 + all = 3 +) + +type cronDirective struct { + kind int + first int + last int + step int + sbeg int + send int +} + +func genericFieldHandler(s string, desc fieldDescriptor) ([]int, error) { + directives, err := genericFieldParse(s, desc) + if err != nil { + return nil, err + } + values := make(map[int]bool) + for _, directive := range directives { + switch directive.kind { + case none: + return nil, fmt.Errorf("syntax error in %s field: '%s'", desc.name, s[directive.sbeg:directive.send]) + case one: + populateOne(values, directive.first) + case span: + populateMany(values, directive.first, directive.last, directive.step) + case all: + return desc.defaultList, nil + } + } + return toList(values), nil +} + +func (expr *Expression) dowFieldHandler(s string) error { + expr.daysOfWeekRestricted = true + expr.daysOfWeek = make(map[int]bool) + expr.lastWeekDaysOfWeek = make(map[int]bool) + expr.specificWeekDaysOfWeek = make(map[int]bool) + + directives, err := genericFieldParse(s, dowDescriptor) + if err != nil { + return err + } + + for _, directive := range directives { + switch directive.kind { + case none: + sdirective := s[directive.sbeg:directive.send] + snormal := strings.ToLower(sdirective) + // `5L` + pairs := makeLayoutRegexp(layoutDowOfLastWeek, dowDescriptor.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + populateOne(expr.lastWeekDaysOfWeek, dowDescriptor.atoi(snormal[pairs[2]:pairs[3]])) + } else { + // `5#3` + pairs := makeLayoutRegexp(layoutDowOfSpecificWeek, dowDescriptor.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + populateOne(expr.specificWeekDaysOfWeek, (dowDescriptor.atoi(snormal[pairs[4]:pairs[5]])-1)*7+(dowDescriptor.atoi(snormal[pairs[2]:pairs[3]])%7)) + } else { + return fmt.Errorf("syntax error in day-of-week field: '%s'", sdirective) + } + } + case one: + populateOne(expr.daysOfWeek, directive.first) + case span: + populateMany(expr.daysOfWeek, directive.first, directive.last, directive.step) + case all: + populateMany(expr.daysOfWeek, directive.first, directive.last, directive.step) + expr.daysOfWeekRestricted = false + } + } + return nil +} + +func (expr *Expression) domFieldHandler(s string) error { + expr.daysOfMonthRestricted = true + expr.lastDayOfMonth = false + expr.lastWorkdayOfMonth = false + expr.daysOfMonth = make(map[int]bool) // days of month map + expr.workdaysOfMonth = make(map[int]bool) // work days of month map + + directives, err := genericFieldParse(s, domDescriptor) + if err != nil { + return err + } + + for _, directive := range directives { + switch directive.kind { + case none: + sdirective := s[directive.sbeg:directive.send] + snormal := strings.ToLower(sdirective) + // `L` + if makeLayoutRegexp(layoutLastDom, domDescriptor.valuePattern).MatchString(snormal) { + expr.lastDayOfMonth = true + } else { + // `LW` + if makeLayoutRegexp(layoutLastWorkdom, domDescriptor.valuePattern).MatchString(snormal) { + expr.lastWorkdayOfMonth = true + } else { + // `15W` + pairs := makeLayoutRegexp(layoutWorkdom, domDescriptor.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + populateOne(expr.workdaysOfMonth, domDescriptor.atoi(snormal[pairs[2]:pairs[3]])) + } else { + return fmt.Errorf("syntax error in day-of-month field: '%s'", sdirective) + } + } + } + case one: + populateOne(expr.daysOfMonth, directive.first) + case span: + populateMany(expr.daysOfMonth, directive.first, directive.last, directive.step) + case all: + populateMany(expr.daysOfMonth, directive.first, directive.last, directive.step) + expr.daysOfMonthRestricted = false + } + } + return nil +} + +/******************************************************************************/ + +func populateOne(values map[int]bool, v int) { + values[v] = true +} + +func populateMany(values map[int]bool, min, max, step int) { + for i := min; i <= max; i += step { + values[i] = true + } +} + +func toList(set map[int]bool) []int { + list := make([]int, len(set)) + i := 0 + for k := range set { + list[i] = k + i += 1 + } + sort.Ints(list) + return list +} + +/******************************************************************************/ + +func genericFieldParse(s string, desc fieldDescriptor) ([]*cronDirective, error) { + // At least one entry must be present + indices := entryFinder.FindAllStringIndex(s, -1) + if len(indices) == 0 { + return nil, fmt.Errorf("%s field: missing directive", desc.name) + } + + directives := make([]*cronDirective, 0, len(indices)) + + for i := range indices { + directive := cronDirective{ + sbeg: indices[i][0], + send: indices[i][1], + } + snormal := strings.ToLower(s[indices[i][0]:indices[i][1]]) + + // `*` + if makeLayoutRegexp(layoutWildcard, desc.valuePattern).MatchString(snormal) { + directive.kind = all + directive.first = desc.min + directive.last = desc.max + directive.step = 1 + directives = append(directives, &directive) + continue + } + // `5` + if makeLayoutRegexp(layoutValue, desc.valuePattern).MatchString(snormal) { + directive.kind = one + directive.first = desc.atoi(snormal) + directives = append(directives, &directive) + continue + } + // `5-20` + pairs := makeLayoutRegexp(layoutRange, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.atoi(snormal[pairs[2]:pairs[3]]) + directive.last = desc.atoi(snormal[pairs[4]:pairs[5]]) + directive.step = 1 + directives = append(directives, &directive) + continue + } + // `*/2` + pairs = makeLayoutRegexp(layoutWildcardAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.min + directive.last = desc.max + directive.step = atoi(snormal[pairs[2]:pairs[3]]) + directives = append(directives, &directive) + continue + } + // `5/2` + pairs = makeLayoutRegexp(layoutValueAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.atoi(snormal[pairs[2]:pairs[3]]) + directive.last = desc.max + directive.step = atoi(snormal[pairs[4]:pairs[5]]) + directives = append(directives, &directive) + continue + } + // `5-20/2` + pairs = makeLayoutRegexp(layoutRangeAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.atoi(snormal[pairs[2]:pairs[3]]) + directive.last = desc.atoi(snormal[pairs[4]:pairs[5]]) + directive.step = atoi(snormal[pairs[6]:pairs[7]]) + directives = append(directives, &directive) + continue + } + // No behavior for this one, let caller deal with it + directive.kind = none + directives = append(directives, &directive) + } + return directives, nil +} + +/******************************************************************************/ + +func makeLayoutRegexp(layout, value string) *regexp.Regexp { + layout = strings.Replace(layout, `%value%`, value, -1) + re := layoutRegexp[layout] + if re == nil { + re = regexp.MustCompile(layout) + layoutRegexp[layout] = re + } + return re +} diff --git a/vendor/github.com/hashicorp/consul/LICENSE b/vendor/github.com/hashicorp/consul/LICENSE new file mode 100644 index 0000000..c33dcc7 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md new file mode 100644 index 0000000..7e64988 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/README.md @@ -0,0 +1,43 @@ +Consul API client +================= + +This package provides the `api` package which attempts to +provide programmatic access to the full Consul API. + +Currently, all of the Consul APIs included in version 0.6.0 are supported. + +Documentation +============= + +The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) + +Usage +===== + +Below is an example of using the Consul client: + +```go +// Get a new client +client, err := api.NewClient(api.DefaultConfig()) +if err != nil { + panic(err) +} + +// Get a handle to the KV API +kv := client.KV() + +// PUT a new KV pair +p := &api.KVPair{Key: "foo", Value: []byte("test")} +_, err = kv.Put(p, nil) +if err != nil { + panic(err) +} + +// Lookup the pair +pair, _, err := kv.Get("foo", nil) +if err != nil { + panic(err) +} +fmt.Printf("KV: %v", pair) + +``` diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go new file mode 100644 index 0000000..8ec9aa5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -0,0 +1,193 @@ +package api + +import ( + "time" +) + +const ( + // ACLClientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" +) + +// ACLEntry is used to represent an ACL entry +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACLReplicationStatus is used to represent the status of ACL replication. +type ACLReplicationStatus struct { + Enabled bool + Running bool + SourceDatacenter string + ReplicatedIndex uint64 + LastSuccess time.Time + LastError time.Time +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster +// to get the first management token. +func (a *ACL) Bootstrap() (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/bootstrap") + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Create is used to generate a new token with the given parameters +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Replication returns the status of the ACL replication process in the datacenter +func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/replication") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries *ACLReplicationStatus + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go new file mode 100644 index 0000000..b42baed --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -0,0 +1,627 @@ +package api + +import ( + "bufio" + "fmt" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + Definition HealthCheckDefinition +} + +// AgentService represents a service known to the agent +type AgentService struct { + ID string + Service string + Tags []string + Port int + Address string + EnableTagOverride bool + CreateIndex uint64 + ModifyIndex uint64 +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AllSegments is used to select for all segments in MembersOpts. +const AllSegments = "_all" + +// MembersOpts is used for querying member information. +type MembersOpts struct { + // WAN is whether to show members from the WAN. + WAN bool + + // Segment is the LAN segment to show members for. Setting this to the + // AllSegments value above will show members in all segments. + Segment string +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Address string `json:",omitempty"` + EnableTagOverride bool `json:",omitempty"` + Meta map[string]string `json:",omitempty"` + Check *AgentServiceCheck + Checks AgentServiceChecks +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + ServiceID string `json:",omitempty"` + AgentServiceCheck +} + +// AgentServiceCheck is used to define a node or service level check +type AgentServiceCheck struct { + CheckID string `json:",omitempty"` + Name string `json:",omitempty"` + Args []string `json:"ScriptArgs,omitempty"` + Script string `json:",omitempty"` // Deprecated, use Args. + DockerContainerID string `json:",omitempty"` + Shell string `json:",omitempty"` // Only supported for Docker. + Interval string `json:",omitempty"` + Timeout string `json:",omitempty"` + TTL string `json:",omitempty"` + HTTP string `json:",omitempty"` + Header map[string][]string `json:",omitempty"` + Method string `json:",omitempty"` + TCP string `json:",omitempty"` + Status string `json:",omitempty"` + Notes string `json:",omitempty"` + TLSSkipVerify bool `json:",omitempty"` + GRPC string `json:",omitempty"` + GRPCUseTLS bool `json:",omitempty"` + + // In Consul 0.7 and later, checks that are associated with a service + // may also contain this optional DeregisterCriticalServiceAfter field, + // which is a timeout in the same Go time format as Interval and TTL. If + // a check is in the critical state for more than this configured value, + // then its associated service (and all of its associated checks) will + // automatically be deregistered. + DeregisterCriticalServiceAfter string `json:",omitempty"` +} +type AgentServiceChecks []*AgentServiceCheck + +// AgentToken is used when updating ACL tokens for an agent. +type AgentToken struct { + Token string +} + +// Metrics info is used to store different types of metric values from the agent. +type MetricsInfo struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +// GaugeValue stores one value that is updated as time goes on, such as +// the amount of memory allocated. +type GaugeValue struct { + Name string + Value float32 + Labels map[string]string +} + +// PointValue holds a series of points for a metric. +type PointValue struct { + Name string + Points []float32 +} + +// SampledValue stores info about a metric that is incremented over time, +// such as the number of requests to an HTTP endpoint. +type SampledValue struct { + Name string + Count int + Sum float64 + Min float64 + Max float64 + Mean float64 + Stddev float64 + Labels map[string]string +} + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Metrics is used to query the agent we are speaking to for +// its current internal metric data +func (a *Agent) Metrics() (*MetricsInfo, error) { + r := a.c.newRequest("GET", "/v1/agent/metrics") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out *MetricsInfo + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Reload triggers a configuration reload for the agent we are connected to. +func (a *Agent) Reload() error { + r := a.c.newRequest("PUT", "/v1/agent/reload") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// MembersOpts returns the known gossip members and can be passed +// additional options for WAN/segment filtering. +func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + r.params.Set("segment", opts.Segment) + if opts.WAN { + r.params.Set("wan", "1") + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// PassTTL is used to set a TTL check to the passing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) PassTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) WarnTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) FailTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "fail") +} + +// updateTTL is used to update the TTL of a check. This is the internal +// method that uses the old API that's present in Consul versions prior to +// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed +// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, +// but keep the old Pass/Warn/Fail methods using the old API under the hood. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 and the server endpoints will +// be removed in 0.9. +func (a *Agent) updateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// checkUpdate is the payload for a PUT for a check update. +type checkUpdate struct { + // Status is one of the api.Health* states: HealthPassing + // ("passing"), HealthWarning ("warning"), or HealthCritical + // ("critical"). + Status string + + // Output is the information to post to the UI for operators as the + // output of the process that decided to hit the TTL check. This is + // different from the note field that's associated with the check + // itself. + Output string +} + +// UpdateTTL is used to update the TTL of a check. This uses the newer API +// that was introduced in Consul 0.6.4 and later. We translate the old status +// strings for compatibility (though a newer version of Consul will still be +// required to use this API). +func (a *Agent) UpdateTTL(checkID, output, status string) error { + switch status { + case "pass", HealthPassing: + status = HealthPassing + case "warn", HealthWarning: + status = HealthWarning + case "fail", HealthCritical: + status = HealthCritical + default: + return fmt.Errorf("Invalid status: %s", status) + } + + endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) + r := a.c.newRequest("PUT", endpoint) + r.obj = &checkUpdate{ + Status: status, + Output: output, + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.obj = check + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Leave is used to have the agent gracefully leave the cluster and shutdown +func (a *Agent) Leave() error { + r := a.c.newRequest("PUT", "/v1/agent/leave") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableServiceMaintenance toggles service maintenance mode on +// for the given service ID. +func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableServiceMaintenance toggles service maintenance mode off +// for the given service ID. +func (a *Agent) DisableServiceMaintenance(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableNodeMaintenance toggles node maintenance mode on for the +// agent we are connected to. +func (a *Agent) EnableNodeMaintenance(reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableNodeMaintenance toggles node maintenance mode off for the +// agent we are connected to. +func (a *Agent) DisableNodeMaintenance() error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Monitor returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop the +// log stream. An empty string will be sent down the given channel when there's +// nothing left to stream, after which the caller should close the stopCh. +func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { + r := a.c.newRequest("GET", "/v1/agent/monitor") + r.setQueryOptions(q) + if loglevel != "" { + r.params.Add("loglevel", loglevel) + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + go func() { + defer resp.Body.Close() + + scanner := bufio.NewScanner(resp.Body) + for { + select { + case <-stopCh: + close(logCh) + return + default: + } + if scanner.Scan() { + // An empty string signals to the caller that + // the scan is done, so make sure we only emit + // that when the scanner says it's done, not if + // we happen to ingest an empty line. + if text := scanner.Text(); text != "" { + logCh <- text + } else { + logCh <- " " + } + } else { + logCh <- "" + } + } + }() + + return logCh, nil +} + +// UpdateACLToken updates the agent's "acl_token". See updateToken for more +// details. +func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_token", token, q) +} + +// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken +// for more details. +func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_agent_token", token, q) +} + +// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See +// updateToken for more details. +func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_agent_master_token", token, q) +} + +// UpdateACLReplicationToken updates the agent's "acl_replication_token". See +// updateToken for more details. +func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_replication_token", token, q) +} + +// updateToken can be used to update an agent's ACL token after the agent has +// started. The tokens are not persisted, so will need to be updated again if +// the agent is restarted. +func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target)) + r.setWriteOptions(q) + r.obj = &AgentToken{Token: token} + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go new file mode 100644 index 0000000..1cdc21e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -0,0 +1,791 @@ +package api + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-rootcerts" +) + +const ( + // HTTPAddrEnvName defines an environment variable name which sets + // the HTTP address if there is no -http-addr specified. + HTTPAddrEnvName = "CONSUL_HTTP_ADDR" + + // HTTPTokenEnvName defines an environment variable name which sets + // the HTTP token. + HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + + // HTTPAuthEnvName defines an environment variable name which sets + // the HTTP authentication header. + HTTPAuthEnvName = "CONSUL_HTTP_AUTH" + + // HTTPSSLEnvName defines an environment variable name which sets + // whether or not to use HTTPS. + HTTPSSLEnvName = "CONSUL_HTTP_SSL" + + // HTTPCAFile defines an environment variable name which sets the + // CA file to use for talking to Consul over TLS. + HTTPCAFile = "CONSUL_CACERT" + + // HTTPCAPath defines an environment variable name which sets the + // path to a directory of CA certs to use for talking to Consul over TLS. + HTTPCAPath = "CONSUL_CAPATH" + + // HTTPClientCert defines an environment variable name which sets the + // client cert file to use for talking to Consul over TLS. + HTTPClientCert = "CONSUL_CLIENT_CERT" + + // HTTPClientKey defines an environment variable name which sets the + // client key file to use for talking to Consul over TLS. + HTTPClientKey = "CONSUL_CLIENT_KEY" + + // HTTPTLSServerName defines an environment variable name which sets the + // server name to use as the SNI host when connecting via TLS + HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME" + + // HTTPSSLVerifyEnvName defines an environment variable name which sets + // whether or not to disable certificate checking. + HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overridden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // Near is used to provide a node name that will sort the results + // in ascending order based on the estimated round trip time from + // that node. Setting this to "_agent" will use the agent's node + // for the sort. + Near string + + // NodeMeta is used to filter results by nodes with the given + // metadata key/value pairs. Currently, only one key/value pair can + // be provided for filtering. + NodeMeta map[string]string + + // RelayFactor is used in keyring operations to cause responses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *QueryOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { + o2 := new(QueryOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // RelayFactor is used in keyring operations to cause responses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *WriteOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { + o2 := new(WriteOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration + + // Is address translation enabled for HTTP responses on this agent + AddressTranslationEnabled bool +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // Transport is the Transport to use for the http client. + Transport *http.Transport + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + TLSConfig TLSConfig +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +type TLSConfig struct { + // Address is the optional address of the Consul server. The port, if any + // will be removed from here and this will be set to the ServerName of the + // resulting config. + Address string + + // CAFile is the optional path to the CA certificate used for Consul + // communication, defaults to the system bundle if not specified. + CAFile string + + // CAPath is the optional path to a directory of CA certificates to use for + // Consul communication, defaults to the system bundle if not specified. + CAPath string + + // CertFile is the optional path to the certificate for Consul + // communication. If this is set then you need to also set KeyFile. + CertFile string + + // KeyFile is the optional path to the private key for Consul communication. + // If this is set then you need to also set CertFile. + KeyFile string + + // InsecureSkipVerify if set to true will disable TLS host verification. + InsecureSkipVerify bool +} + +// DefaultConfig returns a default configuration for the client. By default this +// will pool and reuse idle connections to Consul. If you have a long-lived +// client object, this is the desired behavior and should make the most efficient +// use of the connections to Consul. If you don't reuse a client object , which +// is not recommended, then you may notice idle connections building up over +// time. To avoid this, use the DefaultNonPooledConfig() instead. +func DefaultConfig() *Config { + return defaultConfig(cleanhttp.DefaultPooledTransport) +} + +// DefaultNonPooledConfig returns a default configuration for the client which +// does not pool connections. This isn't a recommended configuration because it +// will reconnect to Consul on every request, but this is useful to avoid the +// accumulation of idle connections if you make many client objects during the +// lifetime of your application. +func DefaultNonPooledConfig() *Config { + return defaultConfig(cleanhttp.DefaultTransport) +} + +// defaultConfig returns the default configuration for the client, using the +// given function to make the transport. +func defaultConfig(transportFn func() *http.Transport) *Config { + config := &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + Transport: transportFn(), + } + + if addr := os.Getenv(HTTPAddrEnvName); addr != "" { + config.Address = addr + } + + if token := os.Getenv(HTTPTokenEnvName); token != "" { + config.Token = token + } + + if auth := os.Getenv(HTTPAuthEnvName); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { + enabled, err := strconv.ParseBool(ssl) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) + } + + if enabled { + config.Scheme = "https" + } + } + + if v := os.Getenv(HTTPTLSServerName); v != "" { + config.TLSConfig.Address = v + } + if v := os.Getenv(HTTPCAFile); v != "" { + config.TLSConfig.CAFile = v + } + if v := os.Getenv(HTTPCAPath); v != "" { + config.TLSConfig.CAPath = v + } + if v := os.Getenv(HTTPClientCert); v != "" { + config.TLSConfig.CertFile = v + } + if v := os.Getenv(HTTPClientKey); v != "" { + config.TLSConfig.KeyFile = v + } + if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" { + doVerify, err := strconv.ParseBool(v) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) + } + if !doVerify { + config.TLSConfig.InsecureSkipVerify = true + } + } + + return config +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { + tlsClientConfig := &tls.Config{ + InsecureSkipVerify: tlsConfig.InsecureSkipVerify, + } + + if tlsConfig.Address != "" { + server := tlsConfig.Address + hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") + if hasPort { + var err error + server, _, err = net.SplitHostPort(server) + if err != nil { + return nil, err + } + } + tlsClientConfig.ServerName = server + } + + if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) + if err != nil { + return nil, err + } + tlsClientConfig.Certificates = []tls.Certificate{tlsCert} + } + + if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" { + rootConfig := &rootcerts.Config{ + CAFile: tlsConfig.CAFile, + CAPath: tlsConfig.CAPath, + } + if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil { + return nil, err + } + } + + return tlsClientConfig, nil +} + +// Client provides a client to the Consul API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.Address) == 0 { + config.Address = defConfig.Address + } + + if len(config.Scheme) == 0 { + config.Scheme = defConfig.Scheme + } + + if config.Transport == nil { + config.Transport = defConfig.Transport + } + + if config.TLSConfig.Address == "" { + config.TLSConfig.Address = defConfig.TLSConfig.Address + } + + if config.TLSConfig.CAFile == "" { + config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile + } + + if config.TLSConfig.CAPath == "" { + config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath + } + + if config.TLSConfig.CertFile == "" { + config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile + } + + if config.TLSConfig.KeyFile == "" { + config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile + } + + if !config.TLSConfig.InsecureSkipVerify { + config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify + } + + if config.HttpClient == nil { + var err error + config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) + if err != nil { + return nil, err + } + } + + parts := strings.SplitN(config.Address, "://", 2) + if len(parts) == 2 { + switch parts[0] { + case "http": + config.Scheme = "http" + case "https": + config.Scheme = "https" + case "unix": + trans := cleanhttp.DefaultTransport() + trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", parts[1]) + } + config.HttpClient = &http.Client{ + Transport: trans, + } + default: + return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) + } + config.Address = parts[1] + } + + if config.Token == "" { + config.Token = defConfig.Token + } + + return &Client{config: *config}, nil +} + +// NewHttpClient returns an http client configured with the given Transport and TLS +// config. +func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { + client := &http.Client{ + Transport: transport, + } + + // TODO (slackpad) - Once we get some run time on the HTTP/2 support we + // should turn it on by default if TLS is enabled. We would basically + // just need to call http2.ConfigureTransport(transport) here. We also + // don't want to introduce another external dependency on + // golang.org/x/net/http2 at this time. For a complete recipe for how + // to enable HTTP/2 support on a transport suitable for the API client + // library see agent/http_test.go:TestHTTPServer_H2. + + if transport.TLSClientConfig == nil { + tlsClientConfig, err := SetupTLSConfig(&tlsConf) + + if err != nil { + return nil, err + } + + transport.TLSClientConfig = tlsClientConfig + } + + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + header http.Header + obj interface{} + ctx context.Context +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.Near != "" { + r.params.Set("near", q.Near) + } + if len(q.NodeMeta) > 0 { + for key, value := range q.NodeMeta { + r.params.Add("node-meta", key+":"+value) + } + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + r.ctx = q.ctx +} + +// durToMsec converts a duration to a millisecond specified string. If the +// user selected a positive value that rounds to 0 ms, then we will use 1 ms +// so they get a short delay, otherwise Consul will translate the 0 ms into +// a huge default delay. +func durToMsec(dur time.Duration) string { + ms := dur / time.Millisecond + if dur > 0 && ms == 0 { + ms = 1 + } + return fmt.Sprintf("%dms", ms) +} + +// serverError is a string we look for to detect 500 errors. +const serverError = "Unexpected response code: 500" + +// IsRetryableError returns true for 500 errors from the Consul servers, and +// network connection errors. These are usually retryable at a later time. +// This applies to reads but NOT to writes. This may return true for errors +// on writes that may have still gone through, so do not use this to retry +// any write operations. +func IsRetryableError(err error) bool { + if err == nil { + return false + } + + if _, ok := err.(net.Error); ok { + return true + } + + // TODO (slackpad) - Make a real error type here instead of using + // a string check. + return strings.Contains(err.Error(), serverError) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + r.ctx = q.ctx +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + b, err := encodeBody(r.obj) + if err != nil { + return nil, err + } + r.body = b + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + req.Header = r.header + + // Setup auth + if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + if r.ctx != nil { + return req.WithContext(r.ctx), nil + } + + return req, nil +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: path, + }, + params: make(map[string][]string), + header: make(http.Header), + } + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.header.Set("X-Consul-Token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Since(start) + return diff, resp, err +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r := c.newRequest("GET", endpoint) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r := c.newRequest("PUT", endpoint) + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } else if _, err := ioutil.ReadAll(resp.Body); err != nil { + return nil, err + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index + index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + + // Parse X-Consul-Translate-Addresses + switch header.Get("X-Consul-Translate-Addresses") { + case "true": + q.AddressTranslationEnabled = true + default: + q.AddressTranslationEnabled = false + } + + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return d, nil, e + } + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + return d, resp, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go new file mode 100644 index 0000000..80ce1bc --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -0,0 +1,200 @@ +package api + +type Node struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + Meta map[string]string + CreateIndex uint64 + ModifyIndex uint64 +} + +type CatalogService struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + NodeMeta map[string]string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags []string + ServiceMeta map[string]string + ServicePort int + ServiceEnableTagOverride bool + CreateIndex uint64 + ModifyIndex uint64 +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogRegistration struct { + ID string + Node string + Address string + TaggedAddresses map[string]string + NodeMeta map[string]string + Datacenter string + Service *AgentService + Check *AgentCheck + SkipNodeUpdate bool +} + +type CatalogDeregistration struct { + Node string + Address string // Obsolete. + Datacenter string + ServiceID string + CheckID string +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go new file mode 100644 index 0000000..53318f1 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/coordinate.go @@ -0,0 +1,106 @@ +package api + +import ( + "github.com/hashicorp/serf/coordinate" +) + +// CoordinateEntry represents a node and its associated network coordinate. +type CoordinateEntry struct { + Node string + Segment string + Coord *coordinate.Coordinate +} + +// CoordinateDatacenterMap has the coordinates for servers in a given datacenter +// and area. Network coordinates are only compatible within the same area. +type CoordinateDatacenterMap struct { + Datacenter string + AreaID string + Coordinates []CoordinateEntry +} + +// Coordinate can be used to query the coordinate endpoints +type Coordinate struct { + c *Client +} + +// Coordinate returns a handle to the coordinate endpoints +func (c *Client) Coordinate() *Coordinate { + return &Coordinate{c} +} + +// Datacenters is used to return the coordinates of all the servers in the WAN +// pool. +func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { + r := c.c.newRequest("GET", "/v1/coordinate/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*CoordinateDatacenterMap + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to return the coordinates of all the nodes in the LAN pool. +func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Update inserts or updates the LAN coordinate of a node. +func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/coordinate/update") + r.setWriteOptions(q) + r.obj = coord + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Node is used to return the coordinates of a single in the LAN pool. +func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go new file mode 100644 index 0000000..85b5b06 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/event.go @@ -0,0 +1,104 @@ +package api + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go new file mode 100644 index 0000000..53f3de4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -0,0 +1,215 @@ +package api + +import ( + "fmt" + "strings" +) + +const ( + // HealthAny is special, and is used as a wild card, + // not as a specific state. + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" + HealthMaint = "maintenance" +) + +const ( + // NodeMaint is the special key set by a node in maintenance mode. + NodeMaint = "_node_maintenance" + + // ServiceMaintPrefix is the prefix for a service in maintenance mode. + ServiceMaintPrefix = "_service_maintenance:" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + ServiceTags []string + + Definition HealthCheckDefinition +} + +// HealthCheckDefinition is used to store the details about +// a health check's execution. +type HealthCheckDefinition struct { + HTTP string + Header map[string][]string + Method string + TLSSkipVerify bool + TCP string + Interval ReadableDuration + Timeout ReadableDuration + DeregisterCriticalServiceAfter ReadableDuration +} + +// HealthChecks is a collection of HealthCheck structs. +type HealthChecks []*HealthCheck + +// AggregatedStatus returns the "best" status for the list of health checks. +// Because a given entry may have many service and node-level health checks +// attached, this function determines the best representative of the status as +// as single string using the following heuristic: +// +// maintenance > critical > warning > passing +// +func (c HealthChecks) AggregatedStatus() string { + var passing, warning, critical, maintenance bool + for _, check := range c { + id := string(check.CheckID) + if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { + maintenance = true + continue + } + + switch check.Status { + case HealthPassing: + passing = true + case HealthWarning: + warning = true + case HealthCritical: + critical = true + default: + return "" + } + } + + switch { + case maintenance: + return HealthMaint + case critical: + return HealthCritical + case warning: + return HealthWarning + case passing: + return HealthPassing + default: + return HealthPassing + } +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks HealthChecks +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + if passingOnly { + r.params.Set(HealthPassing, "1") + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retrieve all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + switch state { + case HealthAny: + case HealthWarning: + case HealthCritical: + case HealthPassing: + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go new file mode 100644 index 0000000..97f5156 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -0,0 +1,420 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + // Key is the name of the key. It is also part of the URL path when accessed + // via the API. + Key string + + // CreateIndex holds the index corresponding the creation of this KVPair. This + // is a read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // LockIndex holds the index corresponding to a lock on this key, if any. This + // is a read-only field. + LockIndex uint64 + + // Flags are any user-defined flags on the key. It is up to the implementer + // to check these values, since Consul does not treat them specially. + Flags uint64 + + // Value is the value for the key. This can be any value, but it will be + // base64 encoded upon transport. + Value []byte + + // Session is a string representing the ID of the session. Any other + // interactions with this key over the same session must specify the same + // session ID. + Session string +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KVOp constants give possible operations available in a KVTxn. +type KVOp string + +const ( + KVSet KVOp = "set" + KVDelete KVOp = "delete" + KVDeleteCAS KVOp = "delete-cas" + KVDeleteTree KVOp = "delete-tree" + KVCAS KVOp = "cas" + KVLock KVOp = "lock" + KVUnlock KVOp = "unlock" + KVGet KVOp = "get" + KVGetTree KVOp = "get-tree" + KVCheckSession KVOp = "check-session" + KVCheckIndex KVOp = "check-index" + KVCheckNotExists KVOp = "check-not-exists" +) + +// KVTxnOp defines a single operation inside a transaction. +type KVTxnOp struct { + Verb KVOp + Key string + Value []byte + Flags uint64 + Index uint64 + Session string +} + +// KVTxnOps defines a set of operations to be performed inside a single +// transaction. +type KVTxnOps []*KVTxnOp + +// KVTxnResponse has the outcome of a transaction. +type KVTxnResponse struct { + Results []*KVPair + Errors TxnErrors +} + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key. The returned pointer +// to the KVPair will be nil if the key does not exist. +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + resp.Body.Close() + return nil, qm, nil + } else if resp.StatusCode != 200 { + resp.Body.Close() + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisition operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + if len(key) > 0 && key[0] == '/' { + return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) + } + + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(key, nil, w) + return qm, err +} + +// DeleteCAS is used for a Delete Check-And-Set operation. The Key +// and ModifyIndex are respected. Returns true on success or false on failures. +func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := map[string]string{ + "cas": strconv.FormatUint(p.ModifyIndex, 10), + } + return k.deleteInternal(p.Key, params, q) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) + return qm, err +} + +func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// TxnOp is the internal format we send to Consul. It's not specific to KV, +// though currently only KV operations are supported. +type TxnOp struct { + KV *KVTxnOp +} + +// TxnOps is a list of transaction operations. +type TxnOps []*TxnOp + +// TxnResult is the internal format we receive from Consul. +type TxnResult struct { + KV *KVPair +} + +// TxnResults is a list of TxnResult objects. +type TxnResults []*TxnResult + +// TxnError is used to return information about an operation in a transaction. +type TxnError struct { + OpIndex int + What string +} + +// TxnErrors is a list of TxnError objects. +type TxnErrors []*TxnError + +// TxnResponse is the internal format we receive from Consul. +type TxnResponse struct { + Results TxnResults + Errors TxnErrors +} + +// Txn is used to apply multiple KV operations in a single, atomic transaction. +// +// Note that Go will perform the required base64 encoding on the values +// automatically because the type is a byte slice. Transactions are defined as a +// list of operations to perform, using the KVOp constants and KVTxnOp structure +// to define operations. If any operation fails, none of the changes are applied +// to the state store. Note that this hides the internal raw transaction interface +// and munges the input and output types into KV-specific ones for ease of use. +// If there are more non-KV operations in the future we may break out a new +// transaction API client, but it will be easy to keep this KV-specific variant +// supported. +// +// Even though this is generally a write operation, we take a QueryOptions input +// and return a QueryMeta output. If the transaction contains only read ops, then +// Consul will fast-path it to a different endpoint internally which supports +// consistency controls, but not blocking. If there are write operations then +// the request will always be routed through raft and any consistency settings +// will be ignored. +// +// Here's an example: +// +// ops := KVTxnOps{ +// &KVTxnOp{ +// Verb: KVLock, +// Key: "test/lock", +// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", +// Value: []byte("hello"), +// }, +// &KVTxnOp{ +// Verb: KVGet, +// Key: "another/key", +// }, +// } +// ok, response, _, err := kv.Txn(&ops, nil) +// +// If there is a problem making the transaction request then an error will be +// returned. Otherwise, the ok value will be true if the transaction succeeded +// or false if it was rolled back. The response is a structured return value which +// will have the outcome of the transaction. Its Results member will have entries +// for each operation. Deleted keys will have a nil entry in the, and to save +// space, the Value of each key in the Results will be nil unless the operation +// is a KVGet. If the transaction was rolled back, the Errors member will have +// entries referencing the index of the operation that failed along with an error +// message. +func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { + r := k.c.newRequest("PUT", "/v1/txn") + r.setQueryOptions(q) + + // Convert into the internal format since this is an all-KV txn. + ops := make(TxnOps, 0, len(txn)) + for _, kvOp := range txn { + ops = append(ops, &TxnOp{KV: kvOp}) + } + r.obj = ops + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return false, nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { + var txnResp TxnResponse + if err := decodeBody(resp, &txnResp); err != nil { + return false, nil, nil, err + } + + // Convert from the internal format. + kvResp := KVTxnResponse{ + Errors: txnResp.Errors, + } + for _, result := range txnResp.Results { + kvResp.Results = append(kvResp.Results, result.KV) + } + return resp.StatusCode == http.StatusOK, &kvResp, qm, nil + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) +} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go new file mode 100644 index 0000000..41f72e7 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -0,0 +1,385 @@ +package api + +import ( + "fmt" + "sync" + "time" +) + +const ( + // DefaultLockSessionName is the Session Name we assign if none is provided + DefaultLockSessionName = "Consul API Lock" + + // DefaultLockSessionTTL is the default session TTL if no Session is provided + // when creating a new Lock. This is used because we do not have another + // other check to depend upon. + DefaultLockSessionTTL = "15s" + + // DefaultLockWaitTime is how long we block for at a time to check if lock + // acquisition is possible. This affects the minimum time it takes to cancel + // a Lock acquisition. + DefaultLockWaitTime = 15 * time.Second + + // DefaultLockRetryTime is how long we wait after a failed lock acquisition + // before attempting to do the lock again. This is so that once a lock-delay + // is in effect, we do not hot loop retrying the acquisition. + DefaultLockRetryTime = 5 * time.Second + + // DefaultMonitorRetryTime is how long we wait after a failed monitor check + // of a lock (500 response code). This allows the monitor to ride out brief + // periods of unavailability, subject to the MonitorRetries setting in the + // lock options which is by default set to 0, disabling this feature. This + // affects locks and semaphores. + DefaultMonitorRetryTime = 2 * time.Second + + // LockFlagValue is a magic flag we set to indicate a key + // is being used for a lock. It is used to detect a potential + // conflict with a semaphore. + LockFlagValue = 0x2ddccbc058a50c18 +) + +var ( + // ErrLockHeld is returned if we attempt to double lock + ErrLockHeld = fmt.Errorf("Lock already held") + + // ErrLockNotHeld is returned if we attempt to unlock a lock + // that we do not hold. + ErrLockNotHeld = fmt.Errorf("Lock not held") + + // ErrLockInUse is returned if we attempt to destroy a lock + // that is in use. + ErrLockInUse = fmt.Errorf("Lock in use") + + // ErrLockConflict is returned if the flags on a key + // used for a lock do not match expectation + ErrLockConflict = fmt.Errorf("Existing key does not match lock use") +) + +// Lock is used to implement client-side leader election. It is follows the +// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. +type Lock struct { + c *Client + opts *LockOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// LockOptions is used to parameterize the Lock behavior. +type LockOptions struct { + Key string // Must be set and have write permissions + Value []byte // Optional, value to associate with the lock + Session string // Optional, created if not specified + SessionOpts *SessionEntry // Optional, options to use when creating a session + SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) + SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime + LockTryOnce bool // Optional, defaults to false which means try forever +} + +// LockKey returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockKey(key string) (*Lock, error) { + opts := &LockOptions{ + Key: key, + } + return c.LockOpts(opts) +} + +// LockOpts returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { + if opts.Key == "" { + return nil, fmt.Errorf("missing key") + } + if opts.SessionName == "" { + opts.SessionName = DefaultLockSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultLockSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.LockWaitTime == 0 { + opts.LockWaitTime = DefaultLockWaitTime + } + l := &Lock{ + c: c, + opts: opts, + } + return l, nil +} + +// Lock attempts to acquire the lock and blocks while doing so. +// Providing a non-nil stopCh can be used to abort the lock attempt. +// Returns a channel that is closed if our lock is lost or an error. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the lock is held until Unlock() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the lock being lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return nil, ErrLockHeld + } + + // Check if we need to create a session first + l.lockSession = l.opts.Session + if l.lockSession == "" { + s, err := l.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + l.sessionRenew = make(chan struct{}) + l.lockSession = s + session := l.c.Session() + go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !l.isHeld { + close(l.sessionRenew) + l.sessionRenew = nil + } + }() + } + + // Setup the query options + kv := l.c.KV() + qOpts := &QueryOptions{ + WaitTime: l.opts.LockWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if l.opts.LockTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Look for an existing lock, blocking until not taken + pair, meta, err := kv.Get(l.opts.Key, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read lock: %v", err) + } + if pair != nil && pair.Flags != LockFlagValue { + return nil, ErrLockConflict + } + locked := false + if pair != nil && pair.Session == l.lockSession { + goto HELD + } + if pair != nil && pair.Session != "" { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Try to acquire the lock + pair = l.lockEntry(l.lockSession) + locked, _, err = kv.Acquire(pair, nil) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock: %v", err) + } + + // Handle the case of not getting the lock + if !locked { + // Determine why the lock failed + qOpts.WaitIndex = 0 + pair, meta, err = kv.Get(l.opts.Key, qOpts) + if pair != nil && pair.Session != "" { + //If the session is not null, this means that a wait can safely happen + //using a long poll + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } else { + // If the session is empty and the lock failed to acquire, then it means + // a lock-delay is in effect and a timed wait must be used + select { + case <-time.After(DefaultLockRetryTime): + goto WAIT + case <-stopCh: + return nil, nil + } + } + } + +HELD: + // Watch to ensure we maintain leadership + leaderCh := make(chan struct{}) + go l.monitorLock(l.lockSession, leaderCh) + + // Set that we own the lock + l.isHeld = true + + // Locked! All done + return leaderCh, nil +} + +// Unlock released the lock. It is an error to call this +// if the lock is not currently held. +func (l *Lock) Unlock() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Ensure the lock is actually held + if !l.isHeld { + return ErrLockNotHeld + } + + // Set that we no longer own the lock + l.isHeld = false + + // Stop the session renew + if l.sessionRenew != nil { + defer func() { + close(l.sessionRenew) + l.sessionRenew = nil + }() + } + + // Get the lock entry, and clear the lock session + lockEnt := l.lockEntry(l.lockSession) + l.lockSession = "" + + // Release the lock explicitly + kv := l.c.KV() + _, _, err := kv.Release(lockEnt, nil) + if err != nil { + return fmt.Errorf("failed to release lock: %v", err) + } + return nil +} + +// Destroy is used to cleanup the lock entry. It is not necessary +// to invoke. It will fail if the lock is in use. +func (l *Lock) Destroy() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return ErrLockHeld + } + + // Look for an existing lock + kv := l.c.KV() + pair, _, err := kv.Get(l.opts.Key, nil) + if err != nil { + return fmt.Errorf("failed to read lock: %v", err) + } + + // Nothing to do if the lock does not exist + if pair == nil { + return nil + } + + // Check for possible flag conflict + if pair.Flags != LockFlagValue { + return ErrLockConflict + } + + // Check if it is in use + if pair.Session != "" { + return ErrLockInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(pair, nil) + if err != nil { + return fmt.Errorf("failed to remove lock: %v", err) + } + if !didRemove { + return ErrLockInUse + } + return nil +} + +// createSession is used to create a new managed session +func (l *Lock) createSession() (string, error) { + session := l.c.Session() + se := l.opts.SessionOpts + if se == nil { + se = &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + } + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// lockEntry returns a formatted KVPair for the lock +func (l *Lock) lockEntry(session string) *KVPair { + return &KVPair{ + Key: l.opts.Key, + Value: l.opts.Value, + Session: session, + Flags: LockFlagValue, + } +} + +// monitorLock is a long running routine to monitor a lock ownership +// It closes the stopCh if we lose our leadership. +func (l *Lock) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := l.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := l.opts.MonitorRetries +RETRY: + pair, meta, err := kv.Get(l.opts.Key, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(l.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + if pair != nil && pair.Session == session { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go new file mode 100644 index 0000000..079e224 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -0,0 +1,11 @@ +package api + +// Operator can be used to perform low-level operator tasks for Consul. +type Operator struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Operator() *Operator { + return &Operator{c} +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go new file mode 100644 index 0000000..a630b69 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_area.go @@ -0,0 +1,193 @@ +// The /v1/operator/area endpoints are available only in Consul Enterprise and +// interact with its network area subsystem. Network areas are used to link +// together Consul servers in different Consul datacenters. With network areas, +// Consul datacenters can be linked together in ways other than a fully-connected +// mesh, as is required for Consul's WAN. +package api + +import ( + "net" + "time" +) + +// Area defines a network area. +type Area struct { + // ID is this identifier for an area (a UUID). This must be left empty + // when creating a new area. + ID string + + // PeerDatacenter is the peer Consul datacenter that will make up the + // other side of this network area. Network areas always involve a pair + // of datacenters: the datacenter where the area was created, and the + // peer datacenter. This is required. + PeerDatacenter string + + // RetryJoin specifies the address of Consul servers to join to, such as + // an IPs or hostnames with an optional port number. This is optional. + RetryJoin []string + + // UseTLS specifies whether gossip over this area should be encrypted with TLS + // if possible. + UseTLS bool +} + +// AreaJoinResponse is returned when a join occurs and gives the result for each +// address. +type AreaJoinResponse struct { + // The address that was joined. + Address string + + // Whether or not the join was a success. + Joined bool + + // If we couldn't join, this is the message with information. + Error string +} + +// SerfMember is a generic structure for reporting information about members in +// a Serf cluster. This is only used by the area endpoints right now, but this +// could be expanded to other endpoints in the future. +type SerfMember struct { + // ID is the node identifier (a UUID). + ID string + + // Name is the node name. + Name string + + // Addr has the IP address. + Addr net.IP + + // Port is the RPC port. + Port uint16 + + // Datacenter is the DC name. + Datacenter string + + // Role is "client", "server", or "unknown". + Role string + + // Build has the version of the Consul agent. + Build string + + // Protocol is the protocol of the Consul agent. + Protocol int + + // Status is the Serf health status "none", "alive", "leaving", "left", + // or "failed". + Status string + + // RTT is the estimated round trip time from the server handling the + // request to the this member. This will be negative if no RTT estimate + // is available. + RTT time.Duration +} + +// AreaCreate will create a new network area. The ID in the given structure must +// be empty and a generated ID will be returned on success. +func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("POST", "/v1/operator/area") + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaUpdate will update the configuration of the network area with the given ID. +func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaGet returns a single network area. +func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaList returns all the available network areas. +func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaDelete deletes the given network area. +func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { + r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// AreaJoin attempts to join the given set of join addresses to the given +// network area. See the Area structure for details about join addresses. +func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") + r.setWriteOptions(q) + r.obj = addresses + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out []*AreaJoinResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, wm, nil +} + +// AreaMembers lists the Serf information about the members in the given area. +func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { + var out []*SerfMember + qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go new file mode 100644 index 0000000..b179406 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -0,0 +1,219 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// AutopilotConfiguration is used for querying/setting the Autopilot configuration. +// Autopilot helps manage operator tasks related to Consul servers like removing +// failed servers from the Raft quorum. +type AutopilotConfiguration struct { + // CleanupDeadServers controls whether to remove dead servers from the Raft + // peer list when a new server joins + CleanupDeadServers bool + + // LastContactThreshold is the limit on the amount of time a server can go + // without leader contact before being considered unhealthy. + LastContactThreshold *ReadableDuration + + // MaxTrailingLogs is the amount of entries in the Raft Log that a server can + // be behind before being considered unhealthy. + MaxTrailingLogs uint64 + + // ServerStabilizationTime is the minimum amount of time a server must be + // in a stable, healthy state before it can be added to the cluster. Only + // applicable with Raft protocol version 3 or higher. + ServerStabilizationTime *ReadableDuration + + // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating + // servers into zones for redundancy. If left blank, this feature will be disabled. + RedundancyZoneTag string + + // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration + // strategy of waiting until enough newer-versioned servers have been added to the + // cluster before promoting them to voters. + DisableUpgradeMigration bool + + // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when + // performing upgrade migrations. If left blank, the Consul version will be used. + UpgradeVersionTag string + + // CreateIndex holds the index corresponding the creation of this configuration. + // This is a read-only field. + CreateIndex uint64 + + // ModifyIndex will be set to the index of the last update when retrieving the + // Autopilot configuration. Resubmitting a configuration with + // AutopilotCASConfiguration will perform a check-and-set operation which ensures + // there hasn't been a subsequent update since the configuration was retrieved. + ModifyIndex uint64 +} + +// ServerHealth is the health (from the leader's point of view) of a server. +type ServerHealth struct { + // ID is the raft ID of the server. + ID string + + // Name is the node name of the server. + Name string + + // Address is the address of the server. + Address string + + // The status of the SerfHealth check for the server. + SerfStatus string + + // Version is the Consul version of the server. + Version string + + // Leader is whether this server is currently the leader. + Leader bool + + // LastContact is the time since this node's last contact with the leader. + LastContact *ReadableDuration + + // LastTerm is the highest leader term this server has a record of in its Raft log. + LastTerm uint64 + + // LastIndex is the last log index this server has a record of in its Raft log. + LastIndex uint64 + + // Healthy is whether or not the server is healthy according to the current + // Autopilot config. + Healthy bool + + // Voter is whether this is a voting server. + Voter bool + + // StableSince is the last time this server's Healthy value changed. + StableSince time.Time +} + +// OperatorHealthReply is a representation of the overall health of the cluster +type OperatorHealthReply struct { + // Healthy is true if all the servers in the cluster are healthy. + Healthy bool + + // FailureTolerance is the number of healthy servers that could be lost without + // an outage occurring. + FailureTolerance int + + // Servers holds the health of each server. + Servers []ServerHealth +} + +// ReadableDuration is a duration type that is serialized to JSON in human readable format. +type ReadableDuration time.Duration + +func NewReadableDuration(dur time.Duration) *ReadableDuration { + d := ReadableDuration(dur) + return &d +} + +func (d *ReadableDuration) String() string { + return d.Duration().String() +} + +func (d *ReadableDuration) Duration() time.Duration { + if d == nil { + return time.Duration(0) + } + return time.Duration(*d) +} + +func (d *ReadableDuration) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil +} + +func (d *ReadableDuration) UnmarshalJSON(raw []byte) error { + if d == nil { + return fmt.Errorf("cannot unmarshal to nil pointer") + } + + str := string(raw) + if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' { + return fmt.Errorf("must be enclosed with quotes: %s", str) + } + dur, err := time.ParseDuration(str[1 : len(str)-1]) + if err != nil { + return err + } + *d = ReadableDuration(dur) + return nil +} + +// AutopilotGetConfiguration is used to query the current Autopilot configuration. +func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out AutopilotConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} + +// AutopilotSetConfiguration is used to set the current Autopilot configuration. +func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// AutopilotCASConfiguration is used to perform a Check-And-Set update on the +// Autopilot configuration. The ModifyIndex value will be respected. Returns +// true on success or false on failures. +func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + + return res, nil +} + +// AutopilotServerHealth +func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/health") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out OperatorHealthReply + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go new file mode 100644 index 0000000..6b61429 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go @@ -0,0 +1,86 @@ +package api + +// keyringRequest is used for performing Keyring operations +type keyringRequest struct { + Key string +} + +// KeyringResponse is returned when listing the gossip encryption keys +type KeyringResponse struct { + // Whether this response is for a WAN ring + WAN bool + + // The datacenter name this request corresponds to + Datacenter string + + // Segment has the network segment this request corresponds to. + Segment string + + // A map of the encryption keys to the number of nodes they're installed on + Keys map[string]int + + // The total number of nodes in this ring + NumNodes int +} + +// KeyringInstall is used to install a new gossip encryption key into the cluster +func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { + r := op.c.newRequest("POST", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringList is used to list the gossip keys installed in the cluster +func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { + r := op.c.newRequest("GET", "/v1/operator/keyring") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*KeyringResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// KeyringRemove is used to remove a gossip encryption key from the cluster +func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringUse is used to change the active gossip encryption key +func (op *Operator) KeyringUse(key string, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go new file mode 100644 index 0000000..a9844df --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -0,0 +1,89 @@ +package api + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Consul. + ID string + + // Node is the node name of the server, as known by Consul, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address string + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Protocol version is the raft protocol version used by the server + ProtocolVersion string + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Consul. + Voter bool +} + +// RaftConfiguration is returned when querying for the current Raft configuration. +type RaftConfiguration struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftGetConfiguration is used to query the current Raft peer set. +func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/raft/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out RaftConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by address in the form of +// "IP:port". +func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("address", string(address)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} + +// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by ID. +func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("id", string(id)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go new file mode 100644 index 0000000..92b05d3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_segment.go @@ -0,0 +1,11 @@ +package api + +// SegmentList returns all the available LAN segments. +func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) { + var out []string + qm, err := op.c.query("/v1/operator/segment", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go new file mode 100644 index 0000000..d322dd8 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -0,0 +1,204 @@ +package api + +// QueryDatacenterOptions sets options about how we fail over if there are no +// healthy nodes in the local datacenter. +type QueryDatacenterOptions struct { + // NearestN is set to the number of remote datacenters to try, based on + // network coordinates. + NearestN int + + // Datacenters is a fixed list of datacenters to try after NearestN. We + // never try a datacenter multiple times, so those are subtracted from + // this list before proceeding. + Datacenters []string +} + +// QueryDNSOptions controls settings when query results are served over DNS. +type QueryDNSOptions struct { + // TTL is the time to live for the served DNS results. + TTL string +} + +// ServiceQuery is used to query for a set of healthy nodes offering a specific +// service. +type ServiceQuery struct { + // Service is the service to query. + Service string + + // Near allows baking in the name of a node to automatically distance- + // sort from. The magic "_agent" value is supported, which sorts near + // the agent which initiated the request by default. + Near string + + // Failover controls what we do if there are no healthy nodes in the + // local datacenter. + Failover QueryDatacenterOptions + + // IgnoreCheckIDs is an optional list of health check IDs to ignore when + // considering which nodes are healthy. It is useful as an emergency measure + // to temporarily override some health check that is producing false negatives + // for example. + IgnoreCheckIDs []string + + // If OnlyPassing is true then we will only include nodes with passing + // health checks (critical AND warning checks will cause a node to be + // discarded) + OnlyPassing bool + + // Tags are a set of required and/or disallowed tags. If a tag is in + // this list it must be present. If the tag is preceded with "!" then + // it is disallowed. + Tags []string + + // NodeMeta is a map of required node metadata fields. If a key/value + // pair is in this map it must be present on the node in order for the + // service entry to be returned. + NodeMeta map[string]string +} + +// QueryTemplate carries the arguments for creating a templated query. +type QueryTemplate struct { + // Type specifies the type of the query template. Currently only + // "name_prefix_match" is supported. This field is required. + Type string + + // Regexp allows specifying a regex pattern to match against the name + // of the query being executed. + Regexp string +} + +// PreparedQueryDefinition defines a complete prepared query. +type PreparedQueryDefinition struct { + // ID is this UUID-based ID for the query, always generated by Consul. + ID string + + // Name is an optional friendly name for the query supplied by the + // user. NOTE - if this feature is used then it will reduce the security + // of any read ACL associated with this query/service since this name + // can be used to locate nodes with supplying any ACL. + Name string + + // Session is an optional session to tie this query's lifetime to. If + // this is omitted then the query will not expire. + Session string + + // Token is the ACL token used when the query was created, and it is + // used when a query is subsequently executed. This token, or a token + // with management privileges, must be used to change the query later. + Token string + + // Service defines a service query (leaving things open for other types + // later). + Service ServiceQuery + + // DNS has options that control how the results of this query are + // served over DNS. + DNS QueryDNSOptions + + // Template is used to pass through the arguments for creating a + // prepared query with an attached template. If a template is given, + // interpolations are possible in other struct fields. + Template QueryTemplate +} + +// PreparedQueryExecuteResponse has the results of executing a query. +type PreparedQueryExecuteResponse struct { + // Service is the service that was queried. + Service string + + // Nodes has the nodes that were output by the query. + Nodes []ServiceEntry + + // DNS has the options for serving these results over DNS. + DNS QueryDNSOptions + + // Datacenter is the datacenter that these results came from. + Datacenter string + + // Failovers is a count of how many times we had to query a remote + // datacenter. + Failovers int +} + +// PreparedQuery can be used to query the prepared query endpoints. +type PreparedQuery struct { + c *Client +} + +// PreparedQuery returns a handle to the prepared query endpoints. +func (c *Client) PreparedQuery() *PreparedQuery { + return &PreparedQuery{c} +} + +// Create makes a new prepared query. The ID of the new query is returned. +func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/query") + r.setWriteOptions(q) + r.obj = query + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update makes updates to an existing prepared query. +func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { + return c.c.write("/v1/query/"+query.ID, query, nil, q) +} + +// List is used to fetch all the prepared queries (always requires a management +// token). +func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Get is used to fetch a specific prepared query. +func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query/"+queryID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Delete is used to delete a specific prepared query. +func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("DELETE", "/v1/query/"+queryID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// Execute is used to execute a specific prepared query. You can execute using +// a query ID or name. +func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { + var out *PreparedQueryExecuteResponse + qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go new file mode 100644 index 0000000..745a208 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/raw.go @@ -0,0 +1,24 @@ +package api + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go new file mode 100644 index 0000000..d0c5741 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -0,0 +1,513 @@ +package api + +import ( + "encoding/json" + "fmt" + "path" + "sync" + "time" +) + +const ( + // DefaultSemaphoreSessionName is the Session Name we assign if none is provided + DefaultSemaphoreSessionName = "Consul API Semaphore" + + // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided + // when creating a new Semaphore. This is used because we do not have another + // other check to depend upon. + DefaultSemaphoreSessionTTL = "15s" + + // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore + // acquisition is possible. This affects the minimum time it takes to cancel + // a Semaphore acquisition. + DefaultSemaphoreWaitTime = 15 * time.Second + + // DefaultSemaphoreKey is the key used within the prefix to + // use for coordination between all the contenders. + DefaultSemaphoreKey = ".lock" + + // SemaphoreFlagValue is a magic flag we set to indicate a key + // is being used for a semaphore. It is used to detect a potential + // conflict with a lock. + SemaphoreFlagValue = 0xe0f69a2baa414de0 +) + +var ( + // ErrSemaphoreHeld is returned if we attempt to double lock + ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") + + // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore + // that we do not hold. + ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") + + // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore + // that is in use. + ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") + + // ErrSemaphoreConflict is returned if the flags on a key + // used for a semaphore do not match expectation + ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") +) + +// Semaphore is used to implement a distributed semaphore +// using the Consul KV primitives. +type Semaphore struct { + c *Client + opts *SemaphoreOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// SemaphoreOptions is used to parameterize the Semaphore +type SemaphoreOptions struct { + Prefix string // Must be set and have write permissions + Limit int // Must be set, and be positive + Value []byte // Optional, value to associate with the contender entry + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime + SemaphoreTryOnce bool // Optional, defaults to false which means try forever +} + +// semaphoreLock is written under the DefaultSemaphoreKey and +// is used to coordinate between all the contenders. +type semaphoreLock struct { + // Limit is the integer limit of holders. This is used to + // verify that all the holders agree on the value. + Limit int + + // Holders is a list of all the semaphore holders. + // It maps the session ID to true. It is used as a set effectively. + Holders map[string]bool +} + +// SemaphorePrefix is used to created a Semaphore which will operate +// at the given KV prefix and uses the given limit for the semaphore. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. +func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { + opts := &SemaphoreOptions{ + Prefix: prefix, + Limit: limit, + } + return c.SemaphoreOpts(opts) +} + +// SemaphoreOpts is used to create a Semaphore with the given options. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. If a Session is not provided, one will be created. +func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { + if opts.Prefix == "" { + return nil, fmt.Errorf("missing prefix") + } + if opts.Limit <= 0 { + return nil, fmt.Errorf("semaphore limit must be positive") + } + if opts.SessionName == "" { + opts.SessionName = DefaultSemaphoreSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultSemaphoreSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.SemaphoreWaitTime == 0 { + opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime + } + s := &Semaphore{ + c: c, + opts: opts, + } + return s, nil +} + +// Acquire attempts to reserve a slot in the semaphore, blocking until +// success, interrupted via the stopCh or an error is encountered. +// Providing a non-nil stopCh can be used to abort the attempt. +// On success, a channel is returned that represents our slot. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the slot is held until Release() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the session being lost. +func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return nil, ErrSemaphoreHeld + } + + // Check if we need to create a session first + s.lockSession = s.opts.Session + if s.lockSession == "" { + sess, err := s.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + s.sessionRenew = make(chan struct{}) + s.lockSession = sess + session := s.c.Session() + go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !s.isHeld { + close(s.sessionRenew) + s.sessionRenew = nil + } + }() + } + + // Create the contender entry + kv := s.c.KV() + made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) + if err != nil || !made { + return nil, fmt.Errorf("failed to make contender entry: %v", err) + } + + // Setup the query options + qOpts := &QueryOptions{ + WaitTime: s.opts.SemaphoreWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if s.opts.SemaphoreTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Read the prefix + pairs, meta, err := kv.List(s.opts.Prefix, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read prefix: %v", err) + } + + // Decode the lock + lockPair := s.findLock(pairs) + if lockPair.Flags != SemaphoreFlagValue { + return nil, ErrSemaphoreConflict + } + lock, err := s.decodeLock(lockPair) + if err != nil { + return nil, err + } + + // Verify we agree with the limit + if lock.Limit != s.opts.Limit { + return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", + lock.Limit, s.opts.Limit) + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if the lock is held + if len(lock.Holders) >= lock.Limit { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Create a new lock with us as a holder + lock.Holders[s.lockSession] = true + newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) + if err != nil { + return nil, err + } + + // Attempt the acquisition + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return nil, fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + // Update failed, could have been a race with another contender, + // retry the operation + goto WAIT + } + + // Watch to ensure we maintain ownership of the slot + lockCh := make(chan struct{}) + go s.monitorLock(s.lockSession, lockCh) + + // Set that we own the lock + s.isHeld = true + + // Acquired! All done + return lockCh, nil +} + +// Release is used to voluntarily give up our semaphore slot. It is +// an error to call this if the semaphore has not been acquired. +func (s *Semaphore) Release() error { + // Hold the lock as we try to release + s.l.Lock() + defer s.l.Unlock() + + // Ensure the lock is actually held + if !s.isHeld { + return ErrSemaphoreNotHeld + } + + // Set that we no longer own the lock + s.isHeld = false + + // Stop the session renew + if s.sessionRenew != nil { + defer func() { + close(s.sessionRenew) + s.sessionRenew = nil + }() + } + + // Get and clear the lock session + lockSession := s.lockSession + s.lockSession = "" + + // Remove ourselves as a lock holder + kv := s.c.KV() + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) +READ: + pair, _, err := kv.Get(key, nil) + if err != nil { + return err + } + if pair == nil { + pair = &KVPair{} + } + lock, err := s.decodeLock(pair) + if err != nil { + return err + } + + // Create a new lock without us as a holder + if _, ok := lock.Holders[lockSession]; ok { + delete(lock.Holders, lockSession) + newLock, err := s.encodeLock(lock, pair.ModifyIndex) + if err != nil { + return err + } + + // Swap the locks + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + goto READ + } + } + + // Destroy the contender entry + contenderKey := path.Join(s.opts.Prefix, lockSession) + if _, err := kv.Delete(contenderKey, nil); err != nil { + return err + } + return nil +} + +// Destroy is used to cleanup the semaphore entry. It is not necessary +// to invoke. It will fail if the semaphore is in use. +func (s *Semaphore) Destroy() error { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return ErrSemaphoreHeld + } + + // List for the semaphore + kv := s.c.KV() + pairs, _, err := kv.List(s.opts.Prefix, nil) + if err != nil { + return fmt.Errorf("failed to read prefix: %v", err) + } + + // Find the lock pair, bail if it doesn't exist + lockPair := s.findLock(pairs) + if lockPair.ModifyIndex == 0 { + return nil + } + if lockPair.Flags != SemaphoreFlagValue { + return ErrSemaphoreConflict + } + + // Decode the lock + lock, err := s.decodeLock(lockPair) + if err != nil { + return err + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if there are any holders + if len(lock.Holders) > 0 { + return ErrSemaphoreInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(lockPair, nil) + if err != nil { + return fmt.Errorf("failed to remove semaphore: %v", err) + } + if !didRemove { + return ErrSemaphoreInUse + } + return nil +} + +// createSession is used to create a new managed session +func (s *Semaphore) createSession() (string, error) { + session := s.c.Session() + se := &SessionEntry{ + Name: s.opts.SessionName, + TTL: s.opts.SessionTTL, + Behavior: SessionBehaviorDelete, + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// contenderEntry returns a formatted KVPair for the contender +func (s *Semaphore) contenderEntry(session string) *KVPair { + return &KVPair{ + Key: path.Join(s.opts.Prefix, session), + Value: s.opts.Value, + Session: session, + Flags: SemaphoreFlagValue, + } +} + +// findLock is used to find the KV Pair which is used for coordination +func (s *Semaphore) findLock(pairs KVPairs) *KVPair { + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) + for _, pair := range pairs { + if pair.Key == key { + return pair + } + } + return &KVPair{Flags: SemaphoreFlagValue} +} + +// decodeLock is used to decode a semaphoreLock from an +// entry in Consul +func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { + // Handle if there is no lock + if pair == nil || pair.Value == nil { + return &semaphoreLock{ + Limit: s.opts.Limit, + Holders: make(map[string]bool), + }, nil + } + + l := &semaphoreLock{} + if err := json.Unmarshal(pair.Value, l); err != nil { + return nil, fmt.Errorf("lock decoding failed: %v", err) + } + return l, nil +} + +// encodeLock is used to encode a semaphoreLock into a KVPair +// that can be PUT +func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { + enc, err := json.Marshal(l) + if err != nil { + return nil, fmt.Errorf("lock encoding failed: %v", err) + } + pair := &KVPair{ + Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), + Value: enc, + Flags: SemaphoreFlagValue, + ModifyIndex: oldIndex, + } + return pair, nil +} + +// pruneDeadHolders is used to remove all the dead lock holders +func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { + // Gather all the live holders + alive := make(map[string]struct{}, len(pairs)) + for _, pair := range pairs { + if pair.Session != "" { + alive[pair.Session] = struct{}{} + } + } + + // Remove any holders that are dead + for holder := range lock.Holders { + if _, ok := alive[holder]; !ok { + delete(lock.Holders, holder) + } + } +} + +// monitorLock is a long running routine to monitor a semaphore ownership +// It closes the stopCh if we lose our slot. +func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := s.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := s.opts.MonitorRetries +RETRY: + pairs, meta, err := kv.List(s.opts.Prefix, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(s.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + lockPair := s.findLock(pairs) + lock, err := s.decodeLock(lockPair) + if err != nil { + return + } + s.pruneDeadHolders(lock, pairs) + if _, ok := lock.Holders[session]; ok { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go new file mode 100644 index 0000000..1613f11 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -0,0 +1,224 @@ +package api + +import ( + "errors" + "fmt" + "time" +) + +const ( + // SessionBehaviorRelease is the default behavior and causes + // all associated locks to be released on session invalidation. + SessionBehaviorRelease = "release" + + // SessionBehaviorDelete is new in Consul 0.5 and changes the + // behavior to delete all associated locks on session invalidation. + // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. + SessionBehaviorDelete = "delete" +) + +var ErrSessionExpired = errors.New("session expired") + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + Checks []string + LockDelay time.Duration + Behavior string + TTL string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["Checks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + var out struct{ ID string } + wm, err := s.c.write("/v1/session/create", obj, &out, q) + if err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalidates a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/renew/"+id) + r.setWriteOptions(q) + rtt, resp, err := s.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + if resp.StatusCode == 404 { + return nil, wm, nil + } else if resp.StatusCode != 200 { + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// RenewPeriodic is used to periodically invoke Session.Renew on a +// session until a doneCh is closed. This is meant to be used in a long running +// goroutine to ensure a session stays valid. +func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { + ctx := q.Context() + + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return err + } + + waitDur := ttl / 2 + lastRenewTime := time.Now() + var lastErr error + for { + if time.Since(lastRenewTime) > ttl { + return lastErr + } + select { + case <-time.After(waitDur): + entry, _, err := s.Renew(id, q) + if err != nil { + waitDur = time.Second + lastErr = err + continue + } + if entry == nil { + return ErrSessionExpired + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + waitDur = ttl / 2 + lastRenewTime = time.Now() + + case <-doneCh: + // Attempt a session destroy + s.Destroy(id, q) + return nil + + case <-ctx.Done(): + // Bail immediately since attempting the destroy would + // use the canceled context in q, which would just bail. + return ctx.Err() + } + } +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/info/"+id, &entries, q) + if err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/node/"+node, &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/list", &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go new file mode 100644 index 0000000..e902377 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -0,0 +1,47 @@ +package api + +import ( + "io" +) + +// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of +// Consul's internal state and restore snapshots for disaster recovery. +type Snapshot struct { + c *Client +} + +// Snapshot returns a handle that exposes the snapshot endpoints. +func (c *Client) Snapshot() *Snapshot { + return &Snapshot{c} +} + +// Save requests a new snapshot and provides an io.ReadCloser with the snapshot +// data to save. If this doesn't return an error, then it's the responsibility +// of the caller to close it. Only a subset of the QueryOptions are supported: +// Datacenter, AllowStale, and Token. +func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/snapshot") + r.setQueryOptions(q) + + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + return resp.Body, qm, nil +} + +// Restore streams in an existing snapshot and attempts to restore it. +func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { + r := s.c.newRequest("PUT", "/v1/snapshot") + r.body = in + r.setWriteOptions(q) + _, _, err := requireOK(s.c.doRequest(r)) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go new file mode 100644 index 0000000..74ef61a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -0,0 +1,43 @@ +package api + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) Leader() (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +// Peers is used to query for a known raft peers +func (s *Status) Peers() ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 0000000..c33dcc7 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 0000000..444df08 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, "does not exist") { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 0000000..a733bef --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/errwrap/go.mod b/vendor/github.com/hashicorp/errwrap/go.mod new file mode 100644 index 0000000..c9b8402 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/errwrap diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 0000000..e87a115 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 0000000..036e531 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 0000000..8d306bf --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,57 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 0000000..0584109 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod new file mode 100644 index 0000000..310f075 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 0000000..7eda377 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,43 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + next.ServeHTTP(w, r) + return + }) +} diff --git a/vendor/github.com/hashicorp/go-hclog/.gitignore b/vendor/github.com/hashicorp/go-hclog/.gitignore new file mode 100644 index 0000000..42cc410 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/.gitignore @@ -0,0 +1 @@ +.idea* \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE new file mode 100644 index 0000000..abaf1e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md new file mode 100644 index 0000000..9b6845e --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -0,0 +1,148 @@ +# go-hclog + +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: https://godoc.org/github.com/hashicorp/go-hclog + +`go-hclog` is a package for Go that provides a simple key/value logging +interface for use in development and production environments. + +It provides logging levels that provide decreased output based upon the +desired amount of output, unlike the standard library `log` package. + +It provides `Printf` style logging of values via `hclog.Fmt()`. + +It provides a human readable output mode for use in development as well as +JSON output mode for production. + +## Stability Note + +While this library is fully open source and HashiCorp will be maintaining it +(since we are and will be making extensive use of it), the API and output +format is subject to minor changes as we fully bake and vet it in our projects. +This notice will be removed once it's fully integrated into our major projects +and no further changes are anticipated. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-hclog`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-hclog + +## Usage + +### Use the global logger + +```go +hclog.Default().Info("hello world") +``` + +```text +2017-07-05T16:15:55.167-0700 [INFO ] hello world +``` + +(Note timestamps are removed in future examples for brevity.) + +### Create a new logger + +```go +appLogger := hclog.New(&hclog.LoggerOptions{ + Name: "my-app", + Level: hclog.LevelFromString("DEBUG"), +}) +``` + +### Emit an Info level message with 2 key/value pairs + +```go +input := "5.5" +_, err := strconv.ParseInt(input, 10, 32) +if err != nil { + appLogger.Info("Invalid input for ParseInt", "input", input, "error", err) +} +``` + +```text +... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax" +``` + +### Create a new Logger for a major subsystem + +```go +subsystemLogger := appLogger.Named("transport") +subsystemLogger.Info("we are transporting something") +``` + +```text +... [INFO ] my-app.transport: we are transporting something +``` + +Notice that logs emitted by `subsystemLogger` contain `my-app.transport`, +reflecting both the application and subsystem names. + +### Create a new Logger with fixed key/value pairs + +Using `With()` will include a specific key-value pair in all messages emitted +by that logger. + +```go +requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363" +requestLogger := subsystemLogger.With("request", requestID) +requestLogger.Info("we are transporting a request") +``` + +```text +... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363 +``` + +This allows sub Loggers to be context specific without having to thread that +into all the callers. + +### Using `hclog.Fmt()` + +```go +var int totalBandwidth = 200 +appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth)) +``` + +```text +... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s" +``` + +### Use this with code that uses the standard library logger + +If you want to use the standard library's `log.Logger` interface you can wrap +`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use +it with the familiar `Println()`, `Printf()`, etc. For example: + +```go +stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, +}) +// Printf() is provided by stdlib log.Logger interface, not hclog.Logger +stdLogger.Printf("[DEBUG] %+v", stdLogger) +``` + +```text +... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} +``` + +Alternatively, you may configure the system-wide logger: + +```go +// log the standard logger from 'import "log"' +log.SetOutput(appLogger.Writer(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetPrefix("") +log.SetFlags(0) + +log.Printf("[DEBUG] %d", 42) +``` + +```text +... [DEBUG] my-app: 42 +``` + +Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +specify `InferLevels: true`, you will not see any output here. You must change +`appLogger` to `DEBUG` to see output. See the docs for more information. diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go new file mode 100644 index 0000000..e5f7f95 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -0,0 +1,34 @@ +package hclog + +import ( + "sync" +) + +var ( + protect sync.Once + def Logger + + // DefaultOptions is used to create the Default logger. These are read + // only when the Default logger is created, so set them as soon as the + // process starts. + DefaultOptions = &LoggerOptions{ + Level: DefaultLevel, + Output: DefaultOutput, + } +) + +// Default returns a globally held logger. This can be a good starting +// place, and then you can use .With() and .Name() to create sub-loggers +// to be used in more specific contexts. +func Default() Logger { + protect.Do(func() { + def = New(DefaultOptions) + }) + + return def +} + +// L is a short alias for Default(). +func L() Logger { + return Default() +} diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod new file mode 100644 index 0000000..0d079a6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -0,0 +1,7 @@ +module github.com/hashicorp/go-hclog + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 +) diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum new file mode 100644 index 0000000..e03ee77 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -0,0 +1,6 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go new file mode 100644 index 0000000..d32630c --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -0,0 +1,511 @@ +package hclog + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "log" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// TimeFormat to use for logging. This is a version of RFC3339 that contains +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO] ", + Warn: "[WARN] ", + Error: "[ERROR]", + } +) + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// intLogger is an internal logger implementation. Internal in that it is +// defined entirely by this package. +type intLogger struct { + json bool + caller bool + name string + timeFormat string + + // This is a pointer so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + mutex *sync.Mutex + writer *writer + level *int32 + + implied []interface{} +} + +// New returns a configured logger. +func New(opts *LoggerOptions) Logger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = DefaultOutput + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mutex := opts.Mutex + if mutex == nil { + mutex = new(sync.Mutex) + } + + l := &intLogger{ + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + mutex: mutex, + writer: newWriter(output), + level: new(int32), + } + + if opts.TimeFormat != "" { + l.timeFormat = opts.TimeFormat + } + + atomic.StoreInt32(l.level, int32(level)) + + return l +} + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { + return + } + + t := time.Now() + + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.json { + l.logJSON(t, level, msg, args...) + } else { + l.log(t, level, msg, args...) + } + + l.writer.Flush(level) +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + + // Find the last separator. + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +// Non-JSON logging format function +func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + + s, ok := _levelToBracket[level] + if ok { + l.writer.WriteString(s) + } else { + l.writer.WriteString("[?????]") + } + + if l.caller { + if _, file, line, ok := runtime.Caller(3); ok { + l.writer.WriteByte(' ') + l.writer.WriteString(trimCallerPath(file)) + l.writer.WriteByte(':') + l.writer.WriteString(strconv.Itoa(line)) + l.writer.WriteByte(':') + } + } + + l.writer.WriteByte(' ') + + if l.name != "" { + l.writer.WriteString(l.name) + l.writer.WriteString(": ") + } + + l.writer.WriteString(msg) + + args = append(l.implied, args...) + + var stacktrace CapturedStacktrace + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + args = append(args, "") + } + } + + l.writer.WriteByte(':') + + FOR: + for i := 0; i < len(args); i = i + 2 { + var ( + val string + raw bool + ) + + switch st := args[i+1].(type) { + case string: + val = st + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case CapturedStacktrace: + stacktrace = st + continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) + default: + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = l.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } + } + + l.writer.WriteByte(' ') + l.writer.WriteString(args[i].(string)) + l.writer.WriteByte('=') + + if !raw && strings.ContainsAny(val, " \t\n\r") { + l.writer.WriteByte('"') + l.writer.WriteString(val) + l.writer.WriteByte('"') + } else { + l.writer.WriteString(val) + } + } + } + + l.writer.WriteString("\n") + + if stacktrace != "" { + l.writer.WriteString(string(stacktrace)) + } +} + +func (l *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = sv.String() + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + } + + if strings.ContainsAny(val, " \t\n\r") { + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + } else { + buf.WriteString(val) + } + } + + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interface{}) { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if l.name != "" { + vals["@module"] = l.name + } + + if l.caller { + if _, file, line, ok := runtime.Caller(3); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + + args = append(l.implied, args...) + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + args = append(args, "") + } + } + + for i := 0; i < len(args); i = i + 2 { + if _, ok := args[i].(string); !ok { + // As this is the logging function not much we can do here + // without injecting into logs... + continue + } + val := args[i+1] + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = sv.Error() + } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) + } + + vals[args[i].(string)] = val + } + } + + err := json.NewEncoder(l.writer).Encode(vals) + if err != nil { + panic(err) + } +} + +// Emit the message and args at DEBUG level +func (l *intLogger) Debug(msg string, args ...interface{}) { + l.Log(Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (l *intLogger) Trace(msg string, args ...interface{}) { + l.Log(Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (l *intLogger) Info(msg string, args ...interface{}) { + l.Log(Info, msg, args...) +} + +// Emit the message and args at WARN level +func (l *intLogger) Warn(msg string, args ...interface{}) { + l.Log(Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (l *intLogger) Error(msg string, args ...interface{}) { + l.Log(Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (l *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(l.level)) == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (l *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(l.level)) <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (l *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(l.level)) <= Info +} + +// Indicate that the logger would emit WARN level logs +func (l *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(l.level)) <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (l *intLogger) IsError() bool { + return Level(atomic.LoadInt32(l.level)) <= Error +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (l *intLogger) With(args ...interface{}) Logger { + if len(args)%2 != 0 { + panic("With() call requires paired arguments") + } + + sl := *l + + result := make(map[string]interface{}, len(l.implied)+len(args)) + keys := make([]string, 0, len(l.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(l.implied); i += 2 { + key := l.implied[i].(string) + keys = append(keys, key) + result[key] = l.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + sl.implied = make([]interface{}, 0, len(l.implied)+len(args)) + for _, k := range keys { + sl.implied = append(sl.implied, k) + sl.implied = append(sl.implied, result[k]) + } + + return &sl +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (l *intLogger) Named(name string) Logger { + sl := *l + + if sl.name != "" { + sl.name = sl.name + "." + name + } else { + sl.name = name + } + + return &sl +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (l *intLogger) ResetNamed(name string) Logger { + sl := *l + + sl.name = name + + return &sl +} + +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (l *intLogger) SetLevel(level Level) { + atomic.StoreInt32(l.level, int32(level)) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriter(opts), "", 0) +} + +func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{l, opts.InferLevels} +} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go new file mode 100644 index 0000000..0f1f826 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -0,0 +1,170 @@ +package hclog + +import ( + "io" + "log" + "os" + "strings" + "sync" +) + +var ( + //DefaultOutput is used as the default log output. + DefaultOutput io.Writer = os.Stderr + + // DefaultLevel is used as the default log level. + DefaultLevel = Info +) + +// Level represents a log level. +type Level int32 + +const ( + // NoLevel is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // Trace is the most verbose level. Intended to be used for the tracing + // of actions in code, such as function enters/exits, etc. + Trace Level = 1 + + // Debug information for programmer lowlevel analysis. + Debug Level = 2 + + // Info information about steady state operations. + Info Level = 3 + + // Warn information about rare but handled events. + Warn Level = 4 + + // Error information about unrecoverable events. + Error Level = 5 +) + +// Format is a simple convience type for when formatting is required. When +// processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values +// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). +type Format []interface{} + +// Fmt returns a Format type. This is a convience function for creating a Format +// type. +func Fmt(str string, args ...interface{}) Format { + return append(Format{str}, args...) +} + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept both "INFO" and "info". + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + default: + return NoLevel + } +} + +// Logger describes the interface that must be implemeted by all loggers. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Updates the level. This should affect all sub-loggers as well. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriter(opts *StandardLoggerOptions) io.Writer +} + +// StandardLoggerOptions can be used to configure a new standard logger. +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool +} + +// LoggerOptions can be used to configure a new logger. +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is supressed + Level Level + + // Where to write the logs to. Defaults to os.Stderr if nil + Output io.Writer + + // An optional mutex pointer in case Output is shared + Mutex *sync.Mutex + + // Control if the output should be in JSON. + JSONFormat bool + + // Include file and line information in each log line + IncludeLocation bool + + // The time format to use instead of the default + TimeFormat string +} diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go new file mode 100644 index 0000000..7ad6b35 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -0,0 +1,52 @@ +package hclog + +import ( + "io" + "io/ioutil" + "log" +) + +// NewNullLogger instantiates a Logger for which all calls +// will succeed without doing anything. +// Useful for testing purposes. +func NewNullLogger() Logger { + return &nullLogger{} +} + +type nullLogger struct{} + +func (l *nullLogger) Trace(msg string, args ...interface{}) {} + +func (l *nullLogger) Debug(msg string, args ...interface{}) {} + +func (l *nullLogger) Info(msg string, args ...interface{}) {} + +func (l *nullLogger) Warn(msg string, args ...interface{}) {} + +func (l *nullLogger) Error(msg string, args ...interface{}) {} + +func (l *nullLogger) IsTrace() bool { return false } + +func (l *nullLogger) IsDebug() bool { return false } + +func (l *nullLogger) IsInfo() bool { return false } + +func (l *nullLogger) IsWarn() bool { return false } + +func (l *nullLogger) IsError() bool { return false } + +func (l *nullLogger) With(args ...interface{}) Logger { return l } + +func (l *nullLogger) Named(name string) Logger { return l } + +func (l *nullLogger) ResetNamed(name string) Logger { return l } + +func (l *nullLogger) SetLevel(level Level) {} + +func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + return log.New(l.StandardWriter(opts), "", log.LstdFlags) +} + +func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return ioutil.Discard +} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go new file mode 100644 index 0000000..9b27bd3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package hclog + +import ( + "bytes" + "runtime" + "strconv" + "strings" + "sync" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +// CapturedStacktrace represents a stacktrace captured by a previous call +// to log.Stacktrace. If passed to a logging function, the stacktrace +// will be appended. +type CapturedStacktrace string + +// Stacktrace captures a stacktrace of the current goroutine and returns +// it to be passed to a logging function. +func Stacktrace() CapturedStacktrace { + return CapturedStacktrace(takeStacktrace()) +} + +func takeStacktrace() string { + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var buffer bytes.Buffer + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.WriteByte('\n') + } + i++ + buffer.WriteString(frame.Function) + buffer.WriteByte('\n') + buffer.WriteByte('\t') + buffer.WriteString(frame.File) + buffer.WriteByte(':') + buffer.WriteString(strconv.Itoa(int(frame.Line))) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go new file mode 100644 index 0000000..913d523 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -0,0 +1,62 @@ +package hclog + +import ( + "bytes" + "strings" +) + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + log Logger + inferLevels bool +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger. +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.inferLevels { + level, str := s.pickLevel(str) + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } + } else { + s.log.Info(str) + } + + return len(data), nil +} + +// Detect, based on conventions, what log level this is. +func (s *stdlogAdapter) pickLevel(str string) (Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return Warn, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERROR]"): + return Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return Error, strings.TrimSpace(str[5:]) + default: + return Info, str + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go new file mode 100644 index 0000000..7e8ec72 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -0,0 +1,74 @@ +package hclog + +import ( + "bytes" + "io" +) + +type writer struct { + b bytes.Buffer + w io.Writer +} + +func newWriter(w io.Writer) *writer { + return &writer{w: w} +} + +func (w *writer) Flush(level Level) (err error) { + if lw, ok := w.w.(LevelWriter); ok { + _, err = lw.LevelWrite(level, w.b.Bytes()) + } else { + _, err = w.w.Write(w.b.Bytes()) + } + w.b.Reset() + return err +} + +func (w *writer) Write(p []byte) (int, error) { + return w.b.Write(p) +} + +func (w *writer) WriteByte(c byte) error { + return w.b.WriteByte(c) +} + +func (w *writer) WriteString(s string) (int, error) { + return w.b.WriteString(s) +} + +// LevelWriter is the interface that wraps the LevelWrite method. +type LevelWriter interface { + LevelWrite(level Level, p []byte) (n int, err error) +} + +// LeveledWriter writes all log messages to the standard writer, +// except for log levels that are defined in the overrides map. +type LeveledWriter struct { + standard io.Writer + overrides map[Level]io.Writer +} + +// NewLeveledWriter returns an initialized LeveledWriter. +// +// standard will be used as the default writer for all log levels, +// except for log levels that are defined in the overrides map. +func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter { + return &LeveledWriter{ + standard: standard, + overrides: overrides, + } +} + +// Write implements io.Writer. +func (lw *LeveledWriter) Write(p []byte) (int, error) { + return lw.standard.Write(p) +} + +// LevelWrite implements LevelWriter. +func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) { + w, ok := lw.overrides[level] + if !ok { + w = lw.standard + } + return w.Write(p) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml new file mode 100644 index 0000000..1a0bbea --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 0000000..e87a115 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 0000000..8910fcc --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,41 @@ +go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 0000000..a636747 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/go.mod b/vendor/github.com/hashicorp/go-immutable-radix/go.mod new file mode 100644 index 0000000..27e7b7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/go.mod @@ -0,0 +1,6 @@ +module github.com/hashicorp/go-immutable-radix + +require ( + github.com/hashicorp/go-uuid v1.0.0 + github.com/hashicorp/golang-lru v0.5.0 +) diff --git a/vendor/github.com/hashicorp/go-immutable-radix/go.sum b/vendor/github.com/hashicorp/go-immutable-radix/go.sum new file mode 100644 index 0000000..7de5dfc --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 0000000..e5e6e57 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,662 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 0000000..9815e02 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,91 @@ +package iradix + +import "bytes" + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + edges{ + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 0000000..7a065e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,292 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 0000000..04814c1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + rawStackEntry{ + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/hashicorp/go-msgpack/LICENSE b/vendor/github.com/hashicorp/go-msgpack/LICENSE new file mode 100644 index 0000000..ccae99f --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2012, 2013 Ugorji Nwoke. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the author nor the names of its contributors may be used + to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go new file mode 100644 index 0000000..c14d810 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go @@ -0,0 +1,143 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +/* +High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc . + +Supported Serialization formats are: + + - msgpack: [https://github.com/msgpack/msgpack] + - binc: [http://github.com/ugorji/binc] + +To install: + + go get github.com/ugorji/go/codec + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. + This was achieved by taking extreme care on: + - managing allocation + - function frame size (important due to Go's use of split stacks), + - reflection use (and by-passing reflection for common types) + - recursion implications + - zero-copy mode (encoding/decoding to byte slice without using temp buffers) + - Correct. + Care was taken to precisely handle corner cases like: + overflows, nil maps and slices, nil value in stream, etc. + - Efficient zero-copying into temporary byte buffers + when encoding into or decoding from a byte slice. + - Standard field renaming via tags + - Encoding from any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Decoding into pointer to any non-nil typed value + (struct, slice, map, int, float32, bool, string, reflect.Value, etc) + - Supports extension functions to handle the encode/decode of custom types + - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler + - Schema-less decoding + (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Msgpack Specific: + - Provides extension functions to handle spec-defined extensions (binary, timestamp) + - Options to resolve ambiguities in handling raw bytes (as string or []byte) + during schema-less decoding (decoding into a nil interface{}) + - RPC Server/Client Codec for msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + - Fast Paths for some container types: + For some container types, we circumvent reflection and its associated overhead + and allocation costs, and encode/decode directly. These types are: + []interface{} + []int + []string + map[interface{}]interface{} + map[int]interface{} + map[string]interface{} + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +Representative Benchmark Results + +Run the benchmark suite using: + go test -bi -bench=. -benchmem + +To run full benchmark suite (including against vmsgpack and bson), +see notes in ext_dep_test.go + +*/ +package codec diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/README.md b/vendor/github.com/hashicorp/go-msgpack/codec/README.md new file mode 100644 index 0000000..6c95d1b --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/README.md @@ -0,0 +1,174 @@ +# Codec + +High Performance and Feature-Rich Idiomatic Go Library providing +encode/decode support for different serialization formats. + +Supported Serialization formats are: + + - msgpack: [https://github.com/msgpack/msgpack] + - binc: [http://github.com/ugorji/binc] + +To install: + + go get github.com/ugorji/go/codec + +Online documentation: [http://godoc.org/github.com/ugorji/go/codec] + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. + This was achieved by taking extreme care on: + - managing allocation + - function frame size (important due to Go's use of split stacks), + - reflection use (and by-passing reflection for common types) + - recursion implications + - zero-copy mode (encoding/decoding to byte slice without using temp buffers) + - Correct. + Care was taken to precisely handle corner cases like: + overflows, nil maps and slices, nil value in stream, etc. + - Efficient zero-copying into temporary byte buffers + when encoding into or decoding from a byte slice. + - Standard field renaming via tags + - Encoding from any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Decoding into pointer to any non-nil typed value + (struct, slice, map, int, float32, bool, string, reflect.Value, etc) + - Supports extension functions to handle the encode/decode of custom types + - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler + - Schema-less decoding + (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Msgpack Specific: + - Provides extension functions to handle spec-defined extensions (binary, timestamp) + - Options to resolve ambiguities in handling raw bytes (as string or []byte) + during schema-less decoding (decoding into a nil interface{}) + - RPC Server/Client Codec for msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + - Fast Paths for some container types: + For some container types, we circumvent reflection and its associated overhead + and allocation costs, and encode/decode directly. These types are: + []interface{} + []int + []string + map[interface{}]interface{} + map[int]interface{} + map[string]interface{} + +## Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +## RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +## Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +## Representative Benchmark Results + +A sample run of benchmark using "go test -bi -bench=. -benchmem": + + /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT) + + .............................................. + BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT + To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=." + Benchmark: + Struct recursive Depth: 1 + ApproxDeepSize Of benchmark Struct: 4694 bytes + Benchmark One-Pass Run: + v-msgpack: len: 1600 bytes + bson: len: 3025 bytes + msgpack: len: 1560 bytes + binc: len: 1187 bytes + gob: len: 1972 bytes + json: len: 2538 bytes + .............................................. + PASS + Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op + Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op + Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op + Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op + Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op + Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op + Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op + Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op + Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op + Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op + Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op + Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op + Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op + Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op + ok ugorji.net/codec 30.827s + +To run full benchmark suite (including against vmsgpack and bson), +see notes in ext\_dep\_test.go + diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go new file mode 100644 index 0000000..2bb5e8f --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go @@ -0,0 +1,786 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "math" + // "reflect" + // "sync/atomic" + "time" + //"fmt" +) + +const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. + +//var _ = fmt.Printf + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + bincVdUnicodeOther + bincVdSymbol + + bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + bincFlBin16 byte = iota + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +type bincEncDriver struct { + w encWriter + m map[string]uint16 // symbols + s uint32 // symbols sequencer + b [8]byte +} + +func (e *bincEncDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + bs := encodeTime(v.(time.Time)) + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriver) encodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriver) encodeFloat32(f float32) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *bincEncDriver) encodeFloat64(f float64) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + bigen.PutUint64(e.b[:], math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (e.b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(e.b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writeb(e.b[:]) +} + +func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { + if lim == 4 { + bigen.PutUint32(e.b[:lim], uint32(v)) + } else { + bigen.PutUint64(e.b[:lim], v) + } + if bincDoPrune { + i := pruneSignExt(e.b[:lim], pos) + e.w.writen1(bd | lim - 1 - byte(i)) + e.w.writeb(e.b[i:lim]) + } else { + e.w.writen1(bd | lim - 1) + e.w.writeb(e.b[:lim]) + } +} + +func (e *bincEncDriver) encodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + switch { + case v >= 0: + e.encUint(bincVdPosInt<<4, true, uint64(v)) + case v == -1: + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + default: + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriver) encodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { + switch { + case v == 0: + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + case pos && v >= 1 && v <= 16: + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + case v <= math.MaxUint8: + e.w.writen2(bd|0x0, byte(v)) + case v <= math.MaxUint16: + e.w.writen1(bd | 0x01) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.encIntegerPrune(bd, pos, v, 4) + default: + e.encIntegerPrune(bd, pos, v, 8) + } +} + +func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriver) encodeArrayPreamble(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriver) encodeMapPreamble(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriver) encodeString(c charEncoding, v string) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) encodeSymbol(v string) { + // if WriteSymbolsNoRefs { + // e.encodeString(c_UTF8, v) + // return + // } + + //symbols only offer benefit when string length > 1. + //This is because strings with length 1 take only 2 bytes to store + //(bd with embedded length, and single byte for string val). + + l := len(v) + switch l { + case 0: + e.encBytesLen(c_UTF8, 0) + return + case 1: + e.encBytesLen(c_UTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + e.w.writeUint16(ui) + } + } else { + e.s++ + ui = uint16(e.s) + //ui = uint16(atomic.AddUint32(&e.s, 1)) + e.m[v] = ui + var lenprec uint8 + switch { + case l <= math.MaxUint8: + // lenprec = 0 + case l <= math.MaxUint16: + lenprec = 1 + case int64(l) <= math.MaxUint32: + lenprec = 2 + default: + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + e.w.writeUint16(ui) + } + switch lenprec { + case 0: + e.w.writen1(byte(l)) + case 1: + e.w.writeUint16(uint16(l)) + case 2: + e.w.writeUint32(uint32(l)) + default: + e.w.writeUint64(uint64(l)) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { + //TODO: support bincUnicodeOther (for now, just use string or bytearray) + if c == c_RAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriver) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { + switch { + case v <= math.MaxUint8: + e.w.writen2(bd, byte(v)) + case v <= math.MaxUint16: + e.w.writen1(bd | 0x01) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.w.writen1(bd | 0x02) + e.w.writeUint32(uint32(v)) + default: + e.w.writen1(bd | 0x03) + e.w.writeUint64(uint64(v)) + } +} + +//------------------------------------ + +type bincDecDriver struct { + r decReader + bdRead bool + bdType valueType + bd byte + vd byte + vs byte + b [8]byte + m map[uint32]string // symbols (use uint32 as key, as map optimizes for it) +} + +func (d *bincDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *bincDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + d.bdType = valueTypeNil + case bincSpFalse, bincSpTrue: + d.bdType = valueTypeBool + case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat: + d.bdType = valueTypeFloat + case bincSpZero: + d.bdType = valueTypeUint + case bincSpNegOne: + d.bdType = valueTypeInt + default: + decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + d.bdType = valueTypeUint + case bincVdPosInt: + d.bdType = valueTypeUint + case bincVdNegInt: + d.bdType = valueTypeInt + case bincVdFloat: + d.bdType = valueTypeFloat + case bincVdString: + d.bdType = valueTypeString + case bincVdSymbol: + d.bdType = valueTypeSymbol + case bincVdByteArray: + d.bdType = valueTypeBytes + case bincVdTimestamp: + d.bdType = valueTypeTimestamp + case bincVdCustomExt: + d.bdType = valueTypeExt + case bincVdArray: + d.bdType = valueTypeArray + case bincVdMap: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd) + } + } + return d.bdType +} + +func (d *bincDecDriver) tryDecodeAsNil() bool { + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return true + } + return false +} + +func (d *bincDecDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + if d.vd != bincVdTimestamp { + decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) + } + tt, err := decodeTime(d.r.readn(int(d.vs))) + if err != nil { + panic(err) + } + var vt *time.Time = v.(*time.Time) + *vt = tt + d.bdRead = false + } +} + +func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { + if vs&0x8 == 0 { + d.r.readb(d.b[0:defaultLen]) + } else { + l := d.r.readn1() + if l > 8 { + decErr("At most 8 bytes used to represent float. Received: %v bytes", l) + } + for i := l; i < 8; i++ { + d.b[i] = 0 + } + d.r.readb(d.b[0:l]) + } +} + +func (d *bincDecDriver) decFloat() (f float64) { + //if true { f = math.Float64frombits(d.r.readUint64()); break; } + switch vs := d.vs; vs & 0x7 { + case bincFlBin32: + d.decFloatPre(vs, 4) + f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) + case bincFlBin64: + d.decFloatPre(vs, 8) + f = math.Float64frombits(bigen.Uint64(d.b[0:8])) + default: + decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + } + return +} + +func (d *bincDecDriver) decUint() (v uint64) { + // need to inline the code (interface conversion and type assertion expensive) + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + d.r.readb(d.b[6:]) + v = uint64(bigen.Uint16(d.b[6:])) + case 2: + d.b[4] = 0 + d.r.readb(d.b[5:]) + v = uint64(bigen.Uint32(d.b[4:])) + case 3: + d.r.readb(d.b[4:]) + v = uint64(bigen.Uint32(d.b[4:])) + case 4, 5, 6: + lim := int(7 - d.vs) + d.r.readb(d.b[lim:]) + for i := 0; i < lim; i++ { + d.b[i] = 0 + } + v = uint64(bigen.Uint64(d.b[:])) + case 7: + d.r.readb(d.b[:]) + v = uint64(bigen.Uint64(d.b[:])) + default: + decErr("unsigned integers with greater than 64 bits of precision not supported") + } + return +} + +func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) { + switch d.vd { + case bincVdPosInt: + ui = d.decUint() + i = int64(ui) + case bincVdNegInt: + ui = d.decUint() + i = -(int64(ui)) + neg = true + case bincVdSmallInt: + i = int64(d.vs) + 1 + ui = uint64(d.vs) + 1 + case bincVdSpecial: + switch d.vs { + case bincSpZero: + //i = 0 + case bincSpNegOne: + neg = true + ui = 1 + i = -1 + default: + decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs) + } + default: + decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + } + return +} + +func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() + if neg { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + checkOverflow(ui, 0, bitsize) + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.vd { + case bincVdSpecial: + d.bdRead = false + switch d.vs { + case bincSpNan: + return math.NaN() + case bincSpPosInf: + return math.Inf(1) + case bincSpZeroFloat, bincSpZero: + return + case bincSpNegInf: + return math.Inf(-1) + default: + decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + } + case bincVdFloat: + f = d.decFloat() + default: + _, i, _ := d.decIntAny() + f = float64(i) + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *bincDecDriver) decodeBool() (b bool) { + switch d.bd { + case (bincVdSpecial | bincSpFalse): + // b = false + case (bincVdSpecial | bincSpTrue): + b = true + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) readMapLen() (length int) { + if d.vd != bincVdMap { + decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) readArrayLen() (length int) { + if d.vd != bincVdArray { + decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) decLen() int { + if d.vs <= 3 { + return int(d.decUint()) + } + return int(d.vs - 4) +} + +func (d *bincDecDriver) decodeString() (s string) { + switch d.vd { + case bincVdString, bincVdByteArray: + if length := d.decLen(); length > 0 { + s = string(d.r.readn(length)) + } + case bincVdSymbol: + //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, + //extract symbol + //if containsStringVal, read it and put in map + //else look in map for string value + var symbol uint32 + vs := d.vs + //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4) + if vs&0x8 == 0 { + symbol = uint32(d.r.readn1()) + } else { + symbol = uint32(d.r.readUint16()) + } + if d.m == nil { + d.m = make(map[uint32]string, 16) + } + + if vs&0x4 == 0 { + s = d.m[symbol] + } else { + var slen int + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(d.r.readUint16()) + case 2: + slen = int(d.r.readUint32()) + case 3: + slen = int(d.r.readUint64()) + } + s = string(d.r.readn(slen)) + d.m[symbol] = s + } + default: + decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + var clen int + switch d.vd { + case bincVdString, bincVdByteArray: + clen = d.decLen() + default: + decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, d.vd) + } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + switch d.vd { + case bincVdCustomExt: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(l) + case bincVdByteArray: + xbs, _ = d.decodeBytes(nil) + default: + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + vt = valueTypeNil + case bincSpFalse: + vt = valueTypeBool + v = false + case bincSpTrue: + vt = valueTypeBool + v = true + case bincSpNan: + vt = valueTypeFloat + v = math.NaN() + case bincSpPosInf: + vt = valueTypeFloat + v = math.Inf(1) + case bincSpNegInf: + vt = valueTypeFloat + v = math.Inf(-1) + case bincSpZeroFloat: + vt = valueTypeFloat + v = float64(0) + case bincSpZero: + vt = valueTypeUint + v = int64(0) // int8(0) + case bincSpNegOne: + vt = valueTypeInt + v = int64(-1) // int8(-1) + default: + decErr("decodeNaked: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + vt = valueTypeUint + v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + case bincVdPosInt: + vt = valueTypeUint + v = d.decUint() + case bincVdNegInt: + vt = valueTypeInt + v = -(int64(d.decUint())) + case bincVdFloat: + vt = valueTypeFloat + v = d.decFloat() + case bincVdSymbol: + vt = valueTypeSymbol + v = d.decodeString() + case bincVdString: + vt = valueTypeString + v = d.decodeString() + case bincVdByteArray: + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) + case bincVdTimestamp: + vt = valueTypeTimestamp + tt, err := decodeTime(d.r.readn(int(d.vs))) + if err != nil { + panic(err) + } + v = tt + case bincVdCustomExt: + vt = valueTypeExt + l := d.decLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt + case bincVdArray: + vt = valueTypeArray + decodeFurther = true + case bincVdMap: + vt = valueTypeMap + decodeFurther = true + default: + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +//BincHandle is a Handle for the Binc Schema-Free Encoding Format +//defined at https://github.com/ugorji/binc . +// +//BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + BasicHandle +} + +func (h *BincHandle) newEncDriver(w encWriter) encDriver { + return &bincEncDriver{w: w} +} + +func (h *BincHandle) newDecDriver(r decReader) decDriver { + return &bincDecDriver{r: r} +} + +func (_ *BincHandle) writeExt() bool { + return true +} + +func (h *BincHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go new file mode 100644 index 0000000..87bef2b --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go @@ -0,0 +1,1048 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "io" + "reflect" + // "runtime/debug" +) + +// Some tagging information for error messages. +const ( + msgTagDec = "codec.decoder" + msgBadDesc = "Unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +type decReader interface { + readn(n int) []byte + readb([]byte) + readn1() uint8 + readUint16() uint16 + readUint32() uint32 + readUint64() uint64 +} + +type decDriver interface { + initReadNext() + tryDecodeAsNil() bool + currentEncodedType() valueType + isBuiltinType(rt uintptr) bool + decodeBuiltin(rt uintptr, v interface{}) + //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + decodeNaked() (v interface{}, vt valueType, decodeFurther bool) + decodeInt(bitsize uint8) (i int64) + decodeUint(bitsize uint8) (ui uint64) + decodeFloat(chkOverflow32 bool) (f float64) + decodeBool() (b bool) + // decodeString can also decode symbols + decodeString() (s string) + decodeBytes(bs []byte) (bsOut []byte, changed bool) + decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + readMapLen() int + readArrayLen() int +} + +type DecodeOptions struct { + // An instance of MapType is used during schema-less decoding of a map in the stream. + // If nil, we use map[interface{}]interface{} + MapType reflect.Type + // An instance of SliceType is used during schema-less decoding of an array in the stream. + // If nil, we use []interface{} + SliceType reflect.Type + // ErrorIfNoField controls whether an error is returned when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool +} + +// ------------------------------------ + +// ioDecReader is a decReader that reads off an io.Reader +type ioDecReader struct { + r io.Reader + br io.ByteReader + x [8]byte //temp byte array re-used internally for efficiency +} + +func (z *ioDecReader) readn(n int) (bs []byte) { + if n <= 0 { + return + } + bs = make([]byte, n) + if _, err := io.ReadAtLeast(z.r, bs, n); err != nil { + panic(err) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil { + panic(err) + } +} + +func (z *ioDecReader) readn1() uint8 { + if z.br != nil { + b, err := z.br.ReadByte() + if err != nil { + panic(err) + } + return b + } + z.readb(z.x[:1]) + return z.x[0] +} + +func (z *ioDecReader) readUint16() uint16 { + z.readb(z.x[:2]) + return bigen.Uint16(z.x[:2]) +} + +func (z *ioDecReader) readUint32() uint32 { + z.readb(z.x[:4]) + return bigen.Uint32(z.x[:4]) +} + +func (z *ioDecReader) readUint64() uint64 { + z.readb(z.x[:8]) + return bigen.Uint64(z.x[:8]) +} + +// ------------------------------------ + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available +} + +func (z *bytesDecReader) consume(n int) (oldcursor int) { + if z.a == 0 { + panic(io.EOF) + } + if n > z.a { + decErr("Trying to read %v bytes. Only %v available", n, z.a) + } + // z.checkAvailable(n) + oldcursor = z.c + z.c = oldcursor + n + z.a = z.a - n + return +} + +func (z *bytesDecReader) readn(n int) (bs []byte) { + if n <= 0 { + return + } + c0 := z.consume(n) + bs = z.b[c0:z.c] + return +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readn(len(bs))) +} + +func (z *bytesDecReader) readn1() uint8 { + c0 := z.consume(1) + return z.b[c0] +} + +// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits +// creating temp slice variable and copying it to helper function is expensive +// for just 2 bits. + +func (z *bytesDecReader) readUint16() uint16 { + c0 := z.consume(2) + return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 +} + +func (z *bytesDecReader) readUint32() uint32 { + c0 := z.consume(4) + return bigen.Uint32(z.b[c0:z.c]) +} + +func (z *bytesDecReader) readUint64() uint64 { + c0 := z.consume(8) + return bigen.Uint64(z.b[c0:z.c]) +} + +// ------------------------------------ + +// decFnInfo has methods for registering handling decoding of a specific type +// based on some characteristics (builtin, extension, reflect Kind, etc) +type decFnInfo struct { + ti *typeInfo + d *Decoder + dd decDriver + xfFn func(reflect.Value, []byte) error + xfTag byte + array bool +} + +func (f *decFnInfo) builtin(rv reflect.Value) { + f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface()) +} + +func (f *decFnInfo) rawExt(rv reflect.Value) { + xtag, xbs := f.dd.decodeExt(false, 0) + rv.Field(0).SetUint(uint64(xtag)) + rv.Field(1).SetBytes(xbs) +} + +func (f *decFnInfo) ext(rv reflect.Value) { + _, xbs := f.dd.decodeExt(true, f.xfTag) + if fnerr := f.xfFn(rv, xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryUnmarshaler + if f.ti.unmIndir == -1 { + bm = rv.Addr().Interface().(binaryUnmarshaler) + } else if f.ti.unmIndir == 0 { + bm = rv.Interface().(binaryUnmarshaler) + } else { + for j, k := int8(0), f.ti.unmIndir; j < k; j++ { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + bm = rv.Interface().(binaryUnmarshaler) + } + xbs, _ := f.dd.decodeBytes(nil) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) kErr(rv reflect.Value) { + decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc) +} + +func (f *decFnInfo) kString(rv reflect.Value) { + rv.SetString(f.dd.decodeString()) +} + +func (f *decFnInfo) kBool(rv reflect.Value) { + rv.SetBool(f.dd.decodeBool()) +} + +func (f *decFnInfo) kInt(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(intBitsize)) +} + +func (f *decFnInfo) kInt64(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(64)) +} + +func (f *decFnInfo) kInt32(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(32)) +} + +func (f *decFnInfo) kInt8(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(8)) +} + +func (f *decFnInfo) kInt16(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(16)) +} + +func (f *decFnInfo) kFloat32(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(true)) +} + +func (f *decFnInfo) kFloat64(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(false)) +} + +func (f *decFnInfo) kUint8(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(8)) +} + +func (f *decFnInfo) kUint64(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(64)) +} + +func (f *decFnInfo) kUint(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(uintBitsize)) +} + +func (f *decFnInfo) kUint32(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(32)) +} + +func (f *decFnInfo) kUint16(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(16)) +} + +// func (f *decFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") +// if rv.IsNil() { +// rv.Set(reflect.New(rv.Type().Elem())) +// } +// f.d.decodeValue(rv.Elem()) +// } + +func (f *decFnInfo) kInterface(rv reflect.Value) { + // debugf("\t===> kInterface") + if !rv.IsNil() { + f.d.decodeValue(rv.Elem()) + return + } + // nil interface: + // use some hieristics to set the nil interface to an + // appropriate value based on the first byte read (byte descriptor bd) + v, vt, decodeFurther := f.dd.decodeNaked() + if vt == valueTypeNil { + return + } + // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc) + // if non-nil value in stream. + if num := f.ti.rt.NumMethod(); num > 0 { + decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", + f.ti.rt, num) + } + var rvn reflect.Value + var useRvn bool + switch vt { + case valueTypeMap: + if f.d.h.MapType == nil { + var m2 map[interface{}]interface{} + v = &m2 + } else { + rvn = reflect.New(f.d.h.MapType).Elem() + useRvn = true + } + case valueTypeArray: + if f.d.h.SliceType == nil { + var m2 []interface{} + v = &m2 + } else { + rvn = reflect.New(f.d.h.SliceType).Elem() + useRvn = true + } + case valueTypeExt: + re := v.(*RawExt) + var bfn func(reflect.Value, []byte) error + rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag) + if bfn == nil { + rvn = reflect.ValueOf(*re) + } else if fnerr := bfn(rvn, re.Data); fnerr != nil { + panic(fnerr) + } + rv.Set(rvn) + return + } + if decodeFurther { + if useRvn { + f.d.decodeValue(rvn) + } else if v != nil { + // this v is a pointer, so we need to dereference it when done + f.d.decode(v) + rvn = reflect.ValueOf(v).Elem() + useRvn = true + } + } + if useRvn { + rv.Set(rvn) + } else if v != nil { + rv.Set(reflect.ValueOf(v)) + } +} + +func (f *decFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap { + containerLen := f.dd.readMapLen() + if containerLen == 0 { + return + } + tisfi := fti.sfi + for j := 0; j < containerLen; j++ { + // var rvkencname string + // ddecode(&rvkencname) + f.dd.initReadNext() + rvkencname := f.dd.decodeString() + // rvksi := ti.getForEncName(rvkencname) + if k := fti.indexForEncName(rvkencname); k > -1 { + sfik := tisfi[k] + if sfik.i != -1 { + f.d.decodeValue(rv.Field(int(sfik.i))) + } else { + f.d.decEmbeddedField(rv, sfik.is) + } + // f.d.decodeValue(ti.field(k, rv)) + } else { + if f.d.h.ErrorIfNoField { + decErr("No matching struct field found when decoding stream map with key: %v", + rvkencname) + } else { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } + } + } + } else if currEncodedType == valueTypeArray { + containerLen := f.dd.readArrayLen() + if containerLen == 0 { + return + } + for j, si := range fti.sfip { + if j == containerLen { + break + } + if si.i != -1 { + f.d.decodeValue(rv.Field(int(si.i))) + } else { + f.d.decEmbeddedField(rv, si.is) + } + } + if containerLen > len(fti.sfip) { + // read remaining values and throw away + for j := len(fti.sfip); j < containerLen; j++ { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } + } + } else { + decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)", + currEncodedType) + } +} + +func (f *decFnInfo) kSlice(rv reflect.Value) { + // A slice can be set from a map or array in stream. + currEncodedType := f.dd.currentEncodedType() + + switch currEncodedType { + case valueTypeBytes, valueTypeString: + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 { + rv.SetBytes(bs2) + } + return + } + } + + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case intfSliceTypId: + f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array) + return + case uint64SliceTypId: + f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array) + return + case int64SliceTypId: + f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array) + return + case strSliceTypId: + f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array) + return + } + } + + containerLen, containerLenS := decContLens(f.dd, currEncodedType) + + // an array can never return a nil slice. so no need to check f.array here. + + if rv.IsNil() { + rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)) + } + + if containerLen == 0 { + return + } + + if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap { + if f.array { // !rv.CanSet() + decErr(msgDecCannotExpandArr, rvcap, containerLenS) + } + rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS) + if rvlen > 0 { + reflect.Copy(rvn, rv) + } + rv.Set(rvn) + } else if containerLenS > rvlen { + rv.SetLen(containerLenS) + } + + for j := 0; j < containerLenS; j++ { + f.d.decodeValue(rv.Index(j)) + } +} + +func (f *decFnInfo) kArray(rv reflect.Value) { + // f.d.decodeValue(rv.Slice(0, rv.Len())) + f.kSlice(rv.Slice(0, rv.Len())) +} + +func (f *decFnInfo) kMap(rv reflect.Value) { + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case mapStrIntfTypId: + f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{})) + return + case mapIntfIntfTypId: + f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{})) + return + case mapInt64IntfTypId: + f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{})) + return + case mapUint64IntfTypId: + f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{})) + return + } + } + + containerLen := f.dd.readMapLen() + + if rv.IsNil() { + rv.Set(reflect.MakeMap(f.ti.rt)) + } + + if containerLen == 0 { + return + } + + ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem() + ktypeId := reflect.ValueOf(ktype).Pointer() + for j := 0; j < containerLen; j++ { + rvk := reflect.New(ktype).Elem() + f.d.decodeValue(rvk) + + // special case if a byte array. + // if ktype == intfTyp { + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(string(rvk.Bytes())) + } + } + rvv := rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } + + f.d.decodeValue(rvv) + rv.SetMapIndex(rvk, rvv) + } +} + +// ---------------------------------------- + +type decFn struct { + i *decFnInfo + f func(*decFnInfo, reflect.Value) +} + +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + r decReader + d decDriver + h *BasicHandle + f map[uintptr]decFn + x []uintptr + s []decFn +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + z := ioDecReader{ + r: r, + } + z.br, _ = r.(io.ByteReader) + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + z := bytesDecReader{ + b: in, + a: len(in), + } + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} +} + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// When decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +func (d *Decoder) Decode(v interface{}) (err error) { + defer panicToErr(&err) + d.decode(v) + return +} + +func (d *Decoder) decode(iv interface{}) { + d.d.initReadNext() + + switch v := iv.(type) { + case nil: + decErr("Cannot decode into nil.") + + case reflect.Value: + d.chkPtrValue(v) + d.decodeValue(v.Elem()) + + case *string: + *v = d.d.decodeString() + case *bool: + *v = d.d.decodeBool() + case *int: + *v = int(d.d.decodeInt(intBitsize)) + case *int8: + *v = int8(d.d.decodeInt(8)) + case *int16: + *v = int16(d.d.decodeInt(16)) + case *int32: + *v = int32(d.d.decodeInt(32)) + case *int64: + *v = d.d.decodeInt(64) + case *uint: + *v = uint(d.d.decodeUint(uintBitsize)) + case *uint8: + *v = uint8(d.d.decodeUint(8)) + case *uint16: + *v = uint16(d.d.decodeUint(16)) + case *uint32: + *v = uint32(d.d.decodeUint(32)) + case *uint64: + *v = d.d.decodeUint(64) + case *float32: + *v = float32(d.d.decodeFloat(true)) + case *float64: + *v = d.d.decodeFloat(false) + case *[]byte: + *v, _ = d.d.decodeBytes(*v) + + case *[]interface{}: + d.decSliceIntf(v, valueTypeInvalid, false) + case *[]uint64: + d.decSliceUint64(v, valueTypeInvalid, false) + case *[]int64: + d.decSliceInt64(v, valueTypeInvalid, false) + case *[]string: + d.decSliceStr(v, valueTypeInvalid, false) + case *map[string]interface{}: + d.decMapStrIntf(v) + case *map[interface{}]interface{}: + d.decMapIntfIntf(v) + case *map[uint64]interface{}: + d.decMapUint64Intf(v) + case *map[int64]interface{}: + d.decMapInt64Intf(v) + + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem()) + + default: + rv := reflect.ValueOf(iv) + d.chkPtrValue(rv) + d.decodeValue(rv.Elem()) + } +} + +func (d *Decoder) decodeValue(rv reflect.Value) { + d.d.initReadNext() + + if d.d.tryDecodeAsNil() { + // If value in stream is nil, set the dereferenced value to its "zero" value (if settable) + if rv.Kind() == reflect.Ptr { + if !rv.IsNil() { + rv.Set(reflect.Zero(rv.Type())) + } + return + } + // for rv.Kind() == reflect.Ptr { + // rv = rv.Elem() + // } + if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid + rv.Set(reflect.Zero(rv.Type())) + } + return + } + + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + + // retrieve or register a focus'ed function for this type + // to eliminate need to do the retrieval multiple times + + // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } + var fn decFn + var ok bool + if useMapForCodecCache { + fn, ok = d.f[rtid] + } else { + for i, v := range d.x { + if v == rtid { + fn, ok = d.s[i], true + break + } + } + } + if !ok { + // debugf("\tCreating new dec fn for type: %v\n", rt) + fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d} + fn.i = &fi + // An extension can be registered for any type, regardless of the Kind + // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. + // + // We can't check if it's an extension byte here first, because the user may have + // registered a pointer or non-pointer type, meaning we may have to recurse first + // before matching a mapped type, even though the extension byte is already detected. + // + // NOTE: if decoding into a nil interface{}, we return a non-nil + // value except even if the container registers a length of 0. + if rtid == rawExtTypId { + fn.f = (*decFnInfo).rawExt + } else if d.d.isBuiltinType(rtid) { + fn.f = (*decFnInfo).builtin + } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*decFnInfo).ext + } else if supportBinaryMarshal && fi.ti.unm { + fn.f = (*decFnInfo).binaryMarshal + } else { + switch rk := rt.Kind(); rk { + case reflect.String: + fn.f = (*decFnInfo).kString + case reflect.Bool: + fn.f = (*decFnInfo).kBool + case reflect.Int: + fn.f = (*decFnInfo).kInt + case reflect.Int64: + fn.f = (*decFnInfo).kInt64 + case reflect.Int32: + fn.f = (*decFnInfo).kInt32 + case reflect.Int8: + fn.f = (*decFnInfo).kInt8 + case reflect.Int16: + fn.f = (*decFnInfo).kInt16 + case reflect.Float32: + fn.f = (*decFnInfo).kFloat32 + case reflect.Float64: + fn.f = (*decFnInfo).kFloat64 + case reflect.Uint8: + fn.f = (*decFnInfo).kUint8 + case reflect.Uint64: + fn.f = (*decFnInfo).kUint64 + case reflect.Uint: + fn.f = (*decFnInfo).kUint + case reflect.Uint32: + fn.f = (*decFnInfo).kUint32 + case reflect.Uint16: + fn.f = (*decFnInfo).kUint16 + // case reflect.Ptr: + // fn.f = (*decFnInfo).kPtr + case reflect.Interface: + fn.f = (*decFnInfo).kInterface + case reflect.Struct: + fn.f = (*decFnInfo).kStruct + case reflect.Slice: + fn.f = (*decFnInfo).kSlice + case reflect.Array: + fi.array = true + fn.f = (*decFnInfo).kArray + case reflect.Map: + fn.f = (*decFnInfo).kMap + default: + fn.f = (*decFnInfo).kErr + } + } + if useMapForCodecCache { + if d.f == nil { + d.f = make(map[uintptr]decFn, 16) + } + d.f[rtid] = fn + } else { + d.s = append(d.s, fn) + d.x = append(d.x, rtid) + } + } + + fn.f(fn.i, rv) + + return +} + +func (d *Decoder) chkPtrValue(rv reflect.Value) { + // We can only decode into a non-nil pointer + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + return + } + if !rv.IsValid() { + decErr("Cannot decode into a zero (ie invalid) reflect.Value") + } + if !rv.CanInterface() { + decErr("Cannot decode into a value without an interface: %v", rv) + } + rvi := rv.Interface() + decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", + rv.Kind(), rvi, rvi) +} + +func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) { + // d.decodeValue(rv.FieldByIndex(index)) + // nil pointers may be here; so reproduce FieldByIndex logic + enhancements + for _, j := range index { + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + // If a pointer, it must be a pointer to struct (based on typeInfo contract) + rv = rv.Elem() + } + rv = rv.Field(j) + } + d.decodeValue(rv) +} + +// -------------------------------------------------- + +// short circuit functions for common maps and slices + +func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]interface{}, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]interface{}, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + d.decode(&s[j]) + } + *v = s +} + +func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]int64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]int64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeInt(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]uint64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]uint64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeUint(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]string, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]string, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeString() + } + *v = s +} + +func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[interface{}]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + var mk interface{} + d.decode(&mk) + // special case if a byte array. + if bv, bok := mk.([]byte); bok { + mk = string(bv) + } + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[int64]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeInt(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[uint64]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeUint(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapStrIntf(v *map[string]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[string]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeString() + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +// ---------------------------------------- + +func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) { + if currEncodedType == valueTypeInvalid { + currEncodedType = dd.currentEncodedType() + } + switch currEncodedType { + case valueTypeArray: + containerLen = dd.readArrayLen() + containerLenS = containerLen + case valueTypeMap: + containerLen = dd.readMapLen() + containerLenS = containerLen * 2 + default: + decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)", + currEncodedType) + } + return +} + +func decErr(format string, params ...interface{}) { + doPanic(msgTagDec, format, params...) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go new file mode 100644 index 0000000..4914be0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go @@ -0,0 +1,1001 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "io" + "reflect" +) + +const ( + // Some tagging information for error messages. + msgTagEnc = "codec.encoder" + defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 + // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366 +) + +// AsSymbolFlag defines what should be encoded as symbols. +type AsSymbolFlag uint8 + +const ( + // AsSymbolDefault is default. + // Currently, this means only encode struct field names as symbols. + // The default is subject to change. + AsSymbolDefault AsSymbolFlag = iota + + // AsSymbolAll means encode anything which could be a symbol as a symbol. + AsSymbolAll = 0xfe + + // AsSymbolNone means do not encode anything as a symbol. + AsSymbolNone = 1 << iota + + // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. + AsSymbolMapStringKeysFlag + + // AsSymbolStructFieldName means encode struct field names as symbols. + AsSymbolStructFieldNameFlag +) + +// encWriter abstracting writing to a byte array or to an io.Writer. +type encWriter interface { + writeUint16(uint16) + writeUint32(uint32) + writeUint64(uint64) + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + atEndOfEncode() +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + isBuiltinType(rt uintptr) bool + encodeBuiltin(rt uintptr, v interface{}) + encodeNil() + encodeInt(i int64) + encodeUint(i uint64) + encodeBool(b bool) + encodeFloat32(f float32) + encodeFloat64(f float64) + encodeExtPreamble(xtag byte, length int) + encodeArrayPreamble(length int) + encodeMapPreamble(length int) + encodeString(c charEncoding, v string) + encodeSymbol(v string) + encodeStringBytes(c charEncoding, v []byte) + //TODO + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) +} + +type ioEncWriterWriter interface { + WriteByte(c byte) error + WriteString(s string) (n int, err error) + Write(p []byte) (n int, err error) +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type EncodeOptions struct { + // Encode a struct as an array, and not as a map. + StructToArray bool + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Sample values: + // AsSymbolNone + // AsSymbolAll + // AsSymbolMapStringKeys + // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + AsSymbols AsSymbolFlag +} + +// --------------------------------------------- + +type simpleIoEncWriterWriter struct { + w io.Writer + bw io.ByteWriter + sw ioEncStringWriter +} + +func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { + if o.bw != nil { + return o.bw.WriteByte(c) + } + _, err = o.w.Write([]byte{c}) + return +} + +func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { + if o.sw != nil { + return o.sw.WriteString(s) + } + return o.w.Write([]byte(s)) +} + +func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { + return o.w.Write(p) +} + +// ---------------------------------------- + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w ioEncWriterWriter + x [8]byte // temp byte array re-used internally for efficiency +} + +func (z *ioEncWriter) writeUint16(v uint16) { + bigen.PutUint16(z.x[:2], v) + z.writeb(z.x[:2]) +} + +func (z *ioEncWriter) writeUint32(v uint32) { + bigen.PutUint32(z.x[:4], v) + z.writeb(z.x[:4]) +} + +func (z *ioEncWriter) writeUint64(v uint64) { + bigen.PutUint64(z.x[:8], v) + z.writeb(z.x[:8]) +} + +func (z *ioEncWriter) writeb(bs []byte) { + if len(bs) == 0 { + return + } + n, err := z.w.Write(bs) + if err != nil { + panic(err) + } + if n != len(bs) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n) + } +} + +func (z *ioEncWriter) writestr(s string) { + n, err := z.w.WriteString(s) + if err != nil { + panic(err) + } + if n != len(s) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.w.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1 byte, b2 byte) { + z.writen1(b1) + z.writen1(b2) +} + +func (z *ioEncWriter) atEndOfEncode() {} + +// ---------------------------------------- + +// bytesEncWriter implements encWriter and can write to an byte slice. +// It is used by Marshal function. +type bytesEncWriter struct { + b []byte + c int // cursor + out *[]byte // write out on atEndOfEncode +} + +func (z *bytesEncWriter) writeUint16(v uint16) { + c := z.grow(2) + z.b[c] = byte(v >> 8) + z.b[c+1] = byte(v) +} + +func (z *bytesEncWriter) writeUint32(v uint32) { + c := z.grow(4) + z.b[c] = byte(v >> 24) + z.b[c+1] = byte(v >> 16) + z.b[c+2] = byte(v >> 8) + z.b[c+3] = byte(v) +} + +func (z *bytesEncWriter) writeUint64(v uint64) { + c := z.grow(8) + z.b[c] = byte(v >> 56) + z.b[c+1] = byte(v >> 48) + z.b[c+2] = byte(v >> 40) + z.b[c+3] = byte(v >> 32) + z.b[c+4] = byte(v >> 24) + z.b[c+5] = byte(v >> 16) + z.b[c+6] = byte(v >> 8) + z.b[c+7] = byte(v) +} + +func (z *bytesEncWriter) writeb(s []byte) { + if len(s) == 0 { + return + } + c := z.grow(len(s)) + copy(z.b[c:], s) +} + +func (z *bytesEncWriter) writestr(s string) { + c := z.grow(len(s)) + copy(z.b[c:], s) +} + +func (z *bytesEncWriter) writen1(b1 byte) { + c := z.grow(1) + z.b[c] = b1 +} + +func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { + c := z.grow(2) + z.b[c] = b1 + z.b[c+1] = b2 +} + +func (z *bytesEncWriter) atEndOfEncode() { + *(z.out) = z.b[:z.c] +} + +func (z *bytesEncWriter) grow(n int) (oldcursor int) { + oldcursor = z.c + z.c = oldcursor + n + if z.c > cap(z.b) { + // Tried using appendslice logic: (if cap < 1024, *2, else *1.25). + // However, it was too expensive, causing too many iterations of copy. + // Using bytes.Buffer model was much better (2*cap + n) + bs := make([]byte, 2*cap(z.b)+n) + copy(bs, z.b[:oldcursor]) + z.b = bs + } else if z.c > len(z.b) { + z.b = z.b[:cap(z.b)] + } + return +} + +// --------------------------------------------- + +type encFnInfo struct { + ti *typeInfo + e *Encoder + ee encDriver + xfFn func(reflect.Value) ([]byte, error) + xfTag byte +} + +func (f *encFnInfo) builtin(rv reflect.Value) { + f.ee.encodeBuiltin(f.ti.rtid, rv.Interface()) +} + +func (f *encFnInfo) rawExt(rv reflect.Value) { + f.e.encRawExt(rv.Interface().(RawExt)) +} + +func (f *encFnInfo) ext(rv reflect.Value) { + bs, fnerr := f.xfFn(rv) + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + return + } + if f.e.hh.writeExt() { + f.ee.encodeExtPreamble(f.xfTag, len(bs)) + f.e.w.writeb(bs) + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } + +} + +func (f *encFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryMarshaler + if f.ti.mIndir == 0 { + bm = rv.Interface().(binaryMarshaler) + } else if f.ti.mIndir == -1 { + bm = rv.Addr().Interface().(binaryMarshaler) + } else { + for j, k := int8(0), f.ti.mIndir; j < k; j++ { + if rv.IsNil() { + f.ee.encodeNil() + return + } + rv = rv.Elem() + } + bm = rv.Interface().(binaryMarshaler) + } + // debugf(">>>> binaryMarshaler: %T", rv.Interface()) + bs, fnerr := bm.MarshalBinary() + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } +} + +func (f *encFnInfo) kBool(rv reflect.Value) { + f.ee.encodeBool(rv.Bool()) +} + +func (f *encFnInfo) kString(rv reflect.Value) { + f.ee.encodeString(c_UTF8, rv.String()) +} + +func (f *encFnInfo) kFloat64(rv reflect.Value) { + f.ee.encodeFloat64(rv.Float()) +} + +func (f *encFnInfo) kFloat32(rv reflect.Value) { + f.ee.encodeFloat32(float32(rv.Float())) +} + +func (f *encFnInfo) kInt(rv reflect.Value) { + f.ee.encodeInt(rv.Int()) +} + +func (f *encFnInfo) kUint(rv reflect.Value) { + f.ee.encodeUint(rv.Uint()) +} + +func (f *encFnInfo) kInvalid(rv reflect.Value) { + f.ee.encodeNil() +} + +func (f *encFnInfo) kErr(rv reflect.Value) { + encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv) +} + +func (f *encFnInfo) kSlice(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case intfSliceTypId: + f.e.encSliceIntf(rv.Interface().([]interface{})) + return + case strSliceTypId: + f.e.encSliceStr(rv.Interface().([]string)) + return + case uint64SliceTypId: + f.e.encSliceUint64(rv.Interface().([]uint64)) + return + case int64SliceTypId: + f.e.encSliceInt64(rv.Interface().([]int64)) + return + } + } + + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + f.ee.encodeStringBytes(c_RAW, rv.Bytes()) + return + } + + l := rv.Len() + if f.ti.mbs { + if l%2 == 1 { + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + } + f.ee.encodeMapPreamble(l / 2) + } else { + f.ee.encodeArrayPreamble(l) + } + if l == 0 { + return + } + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) + } +} + +func (f *encFnInfo) kArray(rv reflect.Value) { + // We cannot share kSlice method, because the array may be non-addressable. + // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array". + // So we have to duplicate the functionality here. + // f.e.encodeValue(rv.Slice(0, rv.Len())) + // f.kSlice(rv.Slice(0, rv.Len())) + + l := rv.Len() + // Handle an array of bytes specially (in line with what is done for slices) + if f.ti.rt.Elem().Kind() == reflect.Uint8 { + if l == 0 { + f.ee.encodeStringBytes(c_RAW, nil) + return + } + var bs []byte + if rv.CanAddr() { + bs = rv.Slice(0, l).Bytes() + } else { + bs = make([]byte, l) + for i := 0; i < l; i++ { + bs[i] = byte(rv.Index(i).Uint()) + } + } + f.ee.encodeStringBytes(c_RAW, bs) + return + } + + if f.ti.mbs { + if l%2 == 1 { + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + } + f.ee.encodeMapPreamble(l / 2) + } else { + f.ee.encodeArrayPreamble(l) + } + if l == 0 { + return + } + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) + } +} + +func (f *encFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + newlen := len(fti.sfi) + rvals := make([]reflect.Value, newlen) + var encnames []string + e := f.e + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfi + encnames = make([]string, newlen) + } + newlen = 0 + for _, si := range tisfi { + if si.i != -1 { + rvals[newlen] = rv.Field(int(si.i)) + } else { + rvals[newlen] = rv.FieldByIndex(si.is) + } + if toMap { + if si.omitEmpty && isEmptyValue(rvals[newlen]) { + continue + } + encnames[newlen] = si.encName + } else { + if si.omitEmpty && isEmptyValue(rvals[newlen]) { + rvals[newlen] = reflect.Value{} //encode as nil + } + } + newlen++ + } + + // debugf(">>>> kStruct: newlen: %v", newlen) + if toMap { + ee := f.ee //don't dereference everytime + ee.encodeMapPreamble(newlen) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + for j := 0; j < newlen; j++ { + if asSymbols { + ee.encodeSymbol(encnames[j]) + } else { + ee.encodeString(c_UTF8, encnames[j]) + } + e.encodeValue(rvals[j]) + } + } else { + f.ee.encodeArrayPreamble(newlen) + for j := 0; j < newlen; j++ { + e.encodeValue(rvals[j]) + } + } +} + +// func (f *encFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") +// if rv.IsNil() { +// f.ee.encodeNil() +// return +// } +// f.e.encodeValue(rv.Elem()) +// } + +func (f *encFnInfo) kInterface(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + f.e.encodeValue(rv.Elem()) +} + +func (f *encFnInfo) kMap(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case mapIntfIntfTypId: + f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{})) + return + case mapStrIntfTypId: + f.e.encMapStrIntf(rv.Interface().(map[string]interface{})) + return + case mapStrStrTypId: + f.e.encMapStrStr(rv.Interface().(map[string]string)) + return + case mapInt64IntfTypId: + f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{})) + return + case mapUint64IntfTypId: + f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{})) + return + } + } + + l := rv.Len() + f.ee.encodeMapPreamble(l) + if l == 0 { + return + } + // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String + keyTypeIsString := f.ti.rt.Key() == stringTyp + var asSymbols bool + if keyTypeIsString { + asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + } + mks := rv.MapKeys() + // for j, lmks := 0, len(mks); j < lmks; j++ { + for j := range mks { + if keyTypeIsString { + if asSymbols { + f.ee.encodeSymbol(mks[j].String()) + } else { + f.ee.encodeString(c_UTF8, mks[j].String()) + } + } else { + f.e.encodeValue(mks[j]) + } + f.e.encodeValue(rv.MapIndex(mks[j])) + } + +} + +// -------------------------------------------------- + +// encFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type encFn struct { + i *encFnInfo + f func(*encFnInfo, reflect.Value) +} + +// -------------------------------------------------- + +// An Encoder writes an object to an output stream in the codec format. +type Encoder struct { + w encWriter + e encDriver + h *BasicHandle + hh Handle + f map[uintptr]encFn + x []uintptr + s []encFn +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + ww, ok := w.(ioEncWriterWriter) + if !ok { + sww := simpleIoEncWriterWriter{w: w} + sww.bw, _ = w.(io.ByteWriter) + sww.sw, _ = w.(ioEncStringWriter) + ww = &sww + //ww = bufio.NewWriterSize(w, defEncByteBufSize) + } + z := ioEncWriter{ + w: ww, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + in := *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + z := bytesEncWriter{ + b: in, + out: out, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} +} + +// Encode writes an object into a stream in the codec format. +// +// Encoding can be configured via the "codec" struct tag for the fields. +// +// The "codec" key in struct field's tag value is the key name, +// followed by an optional comma and options. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's codec tag is "-", OR +// - the field is empty and its codec tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the codec tag on the _struct field sets the "toarray" option +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline if no struct tag is present. +// Else they are encoded as regular fields. +// +// Examples: +// +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field +// //and encode struct as an array +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error) +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + defer panicToErr(&err) + e.encode(v) + e.w.atEndOfEncode() + return +} + +func (e *Encoder) encode(iv interface{}) { + switch v := iv.(type) { + case nil: + e.e.encodeNil() + + case reflect.Value: + e.encodeValue(v) + + case string: + e.e.encodeString(c_UTF8, v) + case bool: + e.e.encodeBool(v) + case int: + e.e.encodeInt(int64(v)) + case int8: + e.e.encodeInt(int64(v)) + case int16: + e.e.encodeInt(int64(v)) + case int32: + e.e.encodeInt(int64(v)) + case int64: + e.e.encodeInt(v) + case uint: + e.e.encodeUint(uint64(v)) + case uint8: + e.e.encodeUint(uint64(v)) + case uint16: + e.e.encodeUint(uint64(v)) + case uint32: + e.e.encodeUint(uint64(v)) + case uint64: + e.e.encodeUint(v) + case float32: + e.e.encodeFloat32(v) + case float64: + e.e.encodeFloat64(v) + + case []interface{}: + e.encSliceIntf(v) + case []string: + e.encSliceStr(v) + case []int64: + e.encSliceInt64(v) + case []uint64: + e.encSliceUint64(v) + case []uint8: + e.e.encodeStringBytes(c_RAW, v) + + case map[interface{}]interface{}: + e.encMapIntfIntf(v) + case map[string]interface{}: + e.encMapStrIntf(v) + case map[string]string: + e.encMapStrStr(v) + case map[int64]interface{}: + e.encMapInt64Intf(v) + case map[uint64]interface{}: + e.encMapUint64Intf(v) + + case *string: + e.e.encodeString(c_UTF8, *v) + case *bool: + e.e.encodeBool(*v) + case *int: + e.e.encodeInt(int64(*v)) + case *int8: + e.e.encodeInt(int64(*v)) + case *int16: + e.e.encodeInt(int64(*v)) + case *int32: + e.e.encodeInt(int64(*v)) + case *int64: + e.e.encodeInt(*v) + case *uint: + e.e.encodeUint(uint64(*v)) + case *uint8: + e.e.encodeUint(uint64(*v)) + case *uint16: + e.e.encodeUint(uint64(*v)) + case *uint32: + e.e.encodeUint(uint64(*v)) + case *uint64: + e.e.encodeUint(*v) + case *float32: + e.e.encodeFloat32(*v) + case *float64: + e.e.encodeFloat64(*v) + + case *[]interface{}: + e.encSliceIntf(*v) + case *[]string: + e.encSliceStr(*v) + case *[]int64: + e.encSliceInt64(*v) + case *[]uint64: + e.encSliceUint64(*v) + case *[]uint8: + e.e.encodeStringBytes(c_RAW, *v) + + case *map[interface{}]interface{}: + e.encMapIntfIntf(*v) + case *map[string]interface{}: + e.encMapStrIntf(*v) + case *map[string]string: + e.encMapStrStr(*v) + case *map[int64]interface{}: + e.encMapInt64Intf(*v) + case *map[uint64]interface{}: + e.encMapUint64Intf(*v) + + default: + e.encodeValue(reflect.ValueOf(iv)) + } +} + +func (e *Encoder) encodeValue(rv reflect.Value) { + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + e.e.encodeNil() + return + } + rv = rv.Elem() + } + + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + + // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) } + var fn encFn + var ok bool + if useMapForCodecCache { + fn, ok = e.f[rtid] + } else { + for i, v := range e.x { + if v == rtid { + fn, ok = e.s[i], true + break + } + } + } + if !ok { + // debugf("\tCreating new enc fn for type: %v\n", rt) + fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e} + fn.i = &fi + if rtid == rawExtTypId { + fn.f = (*encFnInfo).rawExt + } else if e.e.isBuiltinType(rtid) { + fn.f = (*encFnInfo).builtin + } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*encFnInfo).ext + } else if supportBinaryMarshal && fi.ti.m { + fn.f = (*encFnInfo).binaryMarshal + } else { + switch rk := rt.Kind(); rk { + case reflect.Bool: + fn.f = (*encFnInfo).kBool + case reflect.String: + fn.f = (*encFnInfo).kString + case reflect.Float64: + fn.f = (*encFnInfo).kFloat64 + case reflect.Float32: + fn.f = (*encFnInfo).kFloat32 + case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: + fn.f = (*encFnInfo).kInt + case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16: + fn.f = (*encFnInfo).kUint + case reflect.Invalid: + fn.f = (*encFnInfo).kInvalid + case reflect.Slice: + fn.f = (*encFnInfo).kSlice + case reflect.Array: + fn.f = (*encFnInfo).kArray + case reflect.Struct: + fn.f = (*encFnInfo).kStruct + // case reflect.Ptr: + // fn.f = (*encFnInfo).kPtr + case reflect.Interface: + fn.f = (*encFnInfo).kInterface + case reflect.Map: + fn.f = (*encFnInfo).kMap + default: + fn.f = (*encFnInfo).kErr + } + } + if useMapForCodecCache { + if e.f == nil { + e.f = make(map[uintptr]encFn, 16) + } + e.f[rtid] = fn + } else { + e.s = append(e.s, fn) + e.x = append(e.x, rtid) + } + } + + fn.f(fn.i, rv) + +} + +func (e *Encoder) encRawExt(re RawExt) { + if re.Data == nil { + e.e.encodeNil() + return + } + if e.hh.writeExt() { + e.e.encodeExtPreamble(re.Tag, len(re.Data)) + e.w.writeb(re.Data) + } else { + e.e.encodeStringBytes(c_RAW, re.Data) + } +} + +// --------------------------------------------- +// short circuit functions for common maps and slices + +func (e *Encoder) encSliceIntf(v []interface{}) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.encode(v2) + } +} + +func (e *Encoder) encSliceStr(v []string) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeString(c_UTF8, v2) + } +} + +func (e *Encoder) encSliceInt64(v []int64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeInt(v2) + } +} + +func (e *Encoder) encSliceUint64(v []uint64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeUint(v2) + } +} + +func (e *Encoder) encMapStrStr(v map[string]string) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.e.encodeString(c_UTF8, v2) + } +} + +func (e *Encoder) encMapStrIntf(v map[string]interface{}) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.encode(v2) + } +} + +func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeInt(k2) + e.encode(v2) + } +} + +func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeUint(uint64(k2)) + e.encode(v2) + } +} + +func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.encode(k2) + e.encode(v2) + } +} + +// ---------------------------------------- + +func encErr(format string, params ...interface{}) { + doPanic(msgTagEnc, format, params...) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go new file mode 100644 index 0000000..e6dc056 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go @@ -0,0 +1,589 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" + "unicode" + "unicode/utf8" +) + +const ( + structTagName = "codec" + + // Support + // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error) + // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error + // This constant flag will enable or disable it. + supportBinaryMarshal = true + + // Each Encoder or Decoder uses a cache of functions based on conditionals, + // so that the conditionals are not run every time. + // + // Either a map or a slice is used to keep track of the functions. + // The map is more natural, but has a higher cost than a slice/array. + // This flag (useMapForCodecCache) controls which is used. + useMapForCodecCache = false + + // For some common container types, we can short-circuit an elaborate + // reflection dance and call encode/decode directly. + // The currently supported types are: + // - slices of strings, or id's (int64,uint64) or interfaces. + // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf + shortCircuitReflectToFastPath = true + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true +) + +type charEncoding uint8 + +const ( + c_RAW charEncoding = iota + c_UTF8 + c_UTF16LE + c_UTF16BE + c_UTF32LE + c_UTF32BE +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTimestamp + valueTypeExt + + valueTypeInvalid = 0xff +) + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + cachedTypeInfo = make(map[uintptr]*typeInfo, 4) + cachedTypeInfoMutex sync.RWMutex + + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + strSliceTyp = reflect.TypeOf([]string(nil)) + boolSliceTyp = reflect.TypeOf([]bool(nil)) + uintSliceTyp = reflect.TypeOf([]uint(nil)) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + uint16SliceTyp = reflect.TypeOf([]uint16(nil)) + uint32SliceTyp = reflect.TypeOf([]uint32(nil)) + uint64SliceTyp = reflect.TypeOf([]uint64(nil)) + intSliceTyp = reflect.TypeOf([]int(nil)) + int8SliceTyp = reflect.TypeOf([]int8(nil)) + int16SliceTyp = reflect.TypeOf([]int16(nil)) + int32SliceTyp = reflect.TypeOf([]int32(nil)) + int64SliceTyp = reflect.TypeOf([]int64(nil)) + float32SliceTyp = reflect.TypeOf([]float32(nil)) + float64SliceTyp = reflect.TypeOf([]float64(nil)) + + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapStrStrTyp = reflect.TypeOf(map[string]string(nil)) + + mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil)) + mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil)) + mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil)) + mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil)) + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem() + + rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() + intfTypId = reflect.ValueOf(intfTyp).Pointer() + timeTypId = reflect.ValueOf(timeTyp).Pointer() + + intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() + strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer() + + boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer() + uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer() + uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() + uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer() + uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer() + uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer() + intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer() + int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer() + int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer() + int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer() + int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer() + float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer() + float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer() + + mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer() + mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() + mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() + mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer() + mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer() + mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer() + mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer() + // Id = reflect.ValueOf().Pointer() + // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() + + binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer() + binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer() + + intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) + uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +) + +type binaryUnmarshaler interface { + UnmarshalBinary(data []byte) error +} + +type binaryMarshaler interface { + MarshalBinary() (data []byte, err error) +} + +// MapBySlice represents a slice which should be encoded as a map in the stream. +// The slice contains a sequence of key-value pairs. +type MapBySlice interface { + MapBySlice() +} + +// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +// +// BasicHandle encapsulates the common options and extension functions. +type BasicHandle struct { + extHandle + EncodeOptions + DecodeOptions +} + +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + writeExt() bool + getBasicHandle() *BasicHandle + newEncDriver(w encWriter) encDriver + newDecDriver(r decReader) decDriver +} + +// RawExt represents raw unprocessed extension data. +type RawExt struct { + Tag byte + Data []byte +} + +type extTypeTagFn struct { + rtid uintptr + rt reflect.Type + tag byte + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +type extHandle []*extTypeTagFn + +// AddExt registers an encode and decode function for a reflect.Type. +// Note that the type must be a named type, and specifically not +// a pointer or Interface. An error is returned if that is not honored. +// +// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn. +func (o *extHandle) AddExt( + rt reflect.Type, + tag byte, + encfn func(reflect.Value) ([]byte, error), + decfn func(reflect.Value, []byte) error, +) (err error) { + // o is a pointer, because we may need to initialize it + if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", + reflect.Zero(rt).Interface()) + return + } + + // o cannot be nil, since it is always embedded in a Handle. + // if nil, let it panic. + // if o == nil { + // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.") + // return + // } + + rtid := reflect.ValueOf(rt).Pointer() + for _, v := range *o { + if v.rtid == rtid { + v.tag, v.encFn, v.decFn = tag, encfn, decfn + return + } + } + + *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn}) + return +} + +func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { + for _, v := range o { + if v.rtid == rtid { + return v + } + } + return nil +} + +func (o extHandle) getExtForTag(tag byte) *extTypeTagFn { + for _, v := range o { + if v.tag == tag { + return v + } + } + return nil +} + +func (o extHandle) getDecodeExtForTag(tag byte) ( + rv reflect.Value, fn func(reflect.Value, []byte) error) { + if x := o.getExtForTag(tag); x != nil { + // ext is only registered for base + rv = reflect.New(x.rt).Elem() + fn = x.decFn + } + return +} + +func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.decFn + } + return +} + +func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.encFn + } + return +} + +type structFieldInfo struct { + encName string // encode name + + // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. + + is []int // (recursive/embedded) field index in struct + i int16 // field index in struct + omitEmpty bool + toArray bool // if field is _struct, is the toArray set? + + // tag string // tag + // name string // field name + // encNameBs []byte // encoded name as byte stream + // ikind int // kind of the field as an int i.e. int(reflect.Kind) +} + +func parseStructFieldInfo(fname string, stag string) *structFieldInfo { + if fname == "" { + panic("parseStructFieldInfo: No Field Name") + } + si := structFieldInfo{ + // name: fname, + encName: fname, + // tag: stag, + } + + if stag != "" { + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + switch s { + case "omitempty": + si.omitEmpty = true + case "toarray": + si.toArray = true + } + } + } + } + // si.encNameBs = []byte(si.encName) + return &si +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { + return len(p) +} + +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName +} + +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +// typeInfo keeps information about each type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + rt reflect.Type + rtid uintptr + + // baseId gives pointer to the base reflect.Type, after deferencing + // the pointers. E.g. base type of ***time.Time is time.Time. + base reflect.Type + baseId uintptr + baseIndir int8 // number of indirections to get to base + + mbs bool // base type (T or *T) is a MapBySlice + + m bool // base type (T or *T) is a binaryMarshaler + unm bool // base type (T or *T) is a binaryUnmarshaler + mIndir int8 // number of indirections to get to binaryMarshaler type + unmIndir int8 // number of indirections to get to binaryUnmarshaler type + toArray bool // whether this (struct) type should be encoded as an array +} + +func (ti *typeInfo) indexForEncName(name string) int { + //tisfi := ti.sfi + const binarySearchThreshold = 16 + if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { + // linear search. faster than binary search in my testing up to 16-field structs. + for i, si := range ti.sfi { + if si.encName == name { + return i + } + } + } else { + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h + } + } + if i < sfilen && ti.sfi[i].encName == name { + return i + } + } + return -1 +} + +func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + var ok bool + cachedTypeInfoMutex.RLock() + pti, ok = cachedTypeInfo[rtid] + cachedTypeInfoMutex.RUnlock() + if ok { + return + } + + cachedTypeInfoMutex.Lock() + defer cachedTypeInfoMutex.Unlock() + if pti, ok = cachedTypeInfo[rtid]; ok { + return + } + + ti := typeInfo{rt: rt, rtid: rtid} + pti = &ti + + var indir int8 + if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { + ti.m, ti.mIndir = true, indir + } + if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { + ti.unm, ti.unmIndir = true, indir + } + if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { + ti.mbs = true + } + + pt := rt + var ptIndir int8 + // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } + for pt.Kind() == reflect.Ptr { + pt = pt.Elem() + ptIndir++ + } + if ptIndir == 0 { + ti.base = rt + ti.baseId = rtid + } else { + ti.base = pt + ti.baseId = reflect.ValueOf(pt).Pointer() + ti.baseIndir = ptIndir + } + + if rt.Kind() == reflect.Struct { + var siInfo *structFieldInfo + if f, ok := rt.FieldByName(structInfoFieldName); ok { + siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName)) + ti.toArray = siInfo.toArray + } + sfip := make([]*structFieldInfo, 0, rt.NumField()) + rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo) + + // // try to put all si close together + // const tryToPutAllStructFieldInfoTogether = true + // if tryToPutAllStructFieldInfoTogether { + // sfip2 := make([]structFieldInfo, len(sfip)) + // for i, si := range sfip { + // sfip2[i] = *si + // } + // for i := range sfip { + // sfip[i] = &sfip2[i] + // } + // } + + ti.sfip = make([]*structFieldInfo, len(sfip)) + ti.sfi = make([]*structFieldInfo, len(sfip)) + copy(ti.sfip, sfip) + sort.Sort(sfiSortedByEncName(sfip)) + copy(ti.sfi, sfip) + } + // sfi = sfip + cachedTypeInfo[rtid] = pti + return +} + +func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, + sfi *[]*structFieldInfo, siInfo *structFieldInfo, +) { + // for rt.Kind() == reflect.Ptr { + // // indexstack = append(indexstack, 0) + // rt = rt.Elem() + // } + for j := 0; j < rt.NumField(); j++ { + f := rt.Field(j) + stag := f.Tag.Get(structTagName) + if stag == "-" { + continue + } + if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { + continue + } + // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it. + if f.Anonymous && stag == "" { + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if ft.Kind() == reflect.Struct { + indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo) + continue + } + } + // do not let fields with same name in embedded structs override field at higher level. + // this must be done after anonymous check, to allow anonymous field + // still include their child fields + if _, ok := fnameToHastag[f.Name]; ok { + continue + } + si := parseStructFieldInfo(f.Name, stag) + // si.ikind = int(f.Type.Kind()) + if len(indexstack) == 0 { + si.i = int16(j) + } else { + si.i = -1 + si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + } + + if siInfo != nil { + if siInfo.omitEmpty { + si.omitEmpty = true + } + } + *sfi = append(*sfi, si) + fnameToHastag[f.Name] = stag != "" + } +} + +func panicToErr(err *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + //debug.PrintStack() + panicValToErr(x, err) + } + } +} + +func doPanic(tag string, format string, params ...interface{}) { + params2 := make([]interface{}, len(params)+1) + params2[0] = tag + copy(params2[1:], params) + panic(fmt.Errorf("%s: "+format, params2...)) +} + +func checkOverflowFloat32(f float64, doCheck bool) { + if !doCheck { + return + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat() + f2 := f + if f2 < 0 { + f2 = -f + } + if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 { + decErr("Overflow float32 value: %v", f2) + } +} + +func checkOverflow(ui uint64, i int64, bitsize uint8) { + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize == 0 { + return + } + if i != 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) + } + } + if ui != 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) + } + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go new file mode 100644 index 0000000..58417da --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go @@ -0,0 +1,127 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +var ( + raisePanicAfterRecover = false + debugging = true +) + +func panicValToErr(panicVal interface{}, err *error) { + switch xerr := panicVal.(type) { + case error: + *err = xerr + case string: + *err = errors.New(xerr) + default: + *err = fmt.Errorf("%v", panicVal) + } + if raisePanicAfterRecover { + panic(panicVal) + } + return +} + +func isEmptyValueDeref(v reflect.Value, deref bool) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return isEmptyValueDeref(v.Elem(), deref) + } else { + return v.IsNil() + } + case reflect.Struct: + // return true if all fields are empty. else return false. + + // we cannot use equality check, because some fields may be maps/slices/etc + // and consequently the structs are not comparable. + // return v.Interface() == reflect.Zero(v.Type()).Interface() + for i, n := 0, v.NumField(); i < n; i++ { + if !isEmptyValueDeref(v.Field(i), deref) { + return false + } + } + return true + } + return false +} + +func isEmptyValue(v reflect.Value) bool { + return isEmptyValueDeref(v, true) +} + +func debugf(format string, args ...interface{}) { + if debugging { + if len(format) == 0 || format[len(format)-1] != '\n' { + format = format + "\n" + } + fmt.Printf(format, args...) + } +} + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { + if typ == nil { + return + } + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if rt.Implements(iTyp) { + return true, indir + } + if p := rt; p.Kind() == reflect.Ptr { + indir++ + if indir >= math.MaxInt8 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue + } + break + } + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if typ.Kind() != reflect.Ptr { + // Not a pointer, but does the pointer work? + if reflect.PtrTo(typ).Implements(iTyp) { + return true, -1 + } + } + return false, 0 +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go new file mode 100644 index 0000000..da0500d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go @@ -0,0 +1,816 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed + +*/ +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 + + // extensions below + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff +) + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} + msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} + msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} + msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + w encWriter + h *MsgpackHandle +} + +func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {} + +func (e *msgpackEncDriver) encodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) encodeInt(i int64) { + + switch { + case i >= 0: + e.encodeUint(uint64(i)) + case i >= -32: + e.w.writen1(byte(i)) + case i >= math.MinInt8: + e.w.writen2(mpInt8, byte(i)) + case i >= math.MinInt16: + e.w.writen1(mpInt16) + e.w.writeUint16(uint16(i)) + case i >= math.MinInt32: + e.w.writen1(mpInt32) + e.w.writeUint32(uint32(i)) + default: + e.w.writen1(mpInt64) + e.w.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) encodeUint(i uint64) { + switch { + case i <= math.MaxInt8: + e.w.writen1(byte(i)) + case i <= math.MaxUint8: + e.w.writen2(mpUint8, byte(i)) + case i <= math.MaxUint16: + e.w.writen1(mpUint16) + e.w.writeUint16(uint16(i)) + case i <= math.MaxUint32: + e.w.writen1(mpUint32) + e.w.writeUint32(uint32(i)) + default: + e.w.writen1(mpUint64) + e.w.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) encodeFloat32(f float32) { + e.w.writen1(mpFloat) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) encodeFloat64(f float64) { + e.w.writen1(mpDouble) + e.w.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + switch { + case l == 1: + e.w.writen2(mpFixExt1, xtag) + case l == 2: + e.w.writen2(mpFixExt2, xtag) + case l == 4: + e.w.writen2(mpFixExt4, xtag) + case l == 8: + e.w.writen2(mpFixExt8, xtag) + case l == 16: + e.w.writen2(mpFixExt16, xtag) + case l < 256: + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + case l < 65536: + e.w.writen1(mpExt16) + e.w.writeUint16(uint16(l)) + e.w.writen1(xtag) + default: + e.w.writen1(mpExt32) + e.w.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) encodeArrayPreamble(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) encodeMapPreamble(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) encodeString(c charEncoding, s string) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(s)) + } else { + e.writeContainerLen(msgpackContainerStr, len(s)) + } + if len(s) > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) +} + +func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(bs)) + } else { + e.writeContainerLen(msgpackContainerStr, len(bs)) + } + if len(bs) > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + switch { + case ct.hasFixMin && l < ct.fixCutoff: + e.w.writen1(ct.bFixMin | byte(l)) + case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt): + e.w.writen2(ct.b8, uint8(l)) + case l < 65536: + e.w.writen1(ct.b16) + e.w.writeUint16(uint16(l)) + default: + e.w.writen1(ct.b32) + e.w.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + r decReader + h *MsgpackHandle + bd byte + bdRead bool + bdType valueType +} + +func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + bd := d.bd + + switch bd { + case mpNil: + vt = valueTypeNil + d.bdRead = false + case mpFalse: + vt = valueTypeBool + v = false + case mpTrue: + vt = valueTypeBool + v = true + + case mpFloat: + vt = valueTypeFloat + v = float64(math.Float32frombits(d.r.readUint32())) + case mpDouble: + vt = valueTypeFloat + v = math.Float64frombits(d.r.readUint64()) + + case mpUint8: + vt = valueTypeUint + v = uint64(d.r.readn1()) + case mpUint16: + vt = valueTypeUint + v = uint64(d.r.readUint16()) + case mpUint32: + vt = valueTypeUint + v = uint64(d.r.readUint32()) + case mpUint64: + vt = valueTypeUint + v = uint64(d.r.readUint64()) + + case mpInt8: + vt = valueTypeInt + v = int64(int8(d.r.readn1())) + case mpInt16: + vt = valueTypeInt + v = int64(int16(d.r.readUint16())) + case mpInt32: + vt = valueTypeInt + v = int64(int32(d.r.readUint32())) + case mpInt64: + vt = valueTypeInt + v = int64(int64(d.r.readUint64())) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + vt = valueTypeInt + v = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + vt = valueTypeInt + v = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + var rvm string + vt = valueTypeString + v = &rvm + } else { + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm + } + decodeFurther = true + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm + decodeFurther = true + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + vt = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + vt = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + clen := d.readExtLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(clen) + v = &re + vt = valueTypeExt + default: + decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + if !decodeFurther { + d.bdRead = false + } + return +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(d.r.readUint16())) + case mpUint32: + i = int64(uint64(d.r.readUint32())) + case mpUint64: + i = int64(d.r.readUint64()) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(d.r.readUint16())) + case mpInt32: + i = int64(int32(d.r.readUint32())) + case mpInt64: + i = int64(d.r.readUint64()) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(d.r.readUint16()) + case mpUint32: + ui = uint64(d.r.readUint32()) + case mpUint64: + ui = d.r.readUint64() + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt16: + if i := int64(int16(d.r.readUint16())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt32: + if i := int64(int32(d.r.readUint32())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt64: + if i := int64(d.r.readUint64()); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + default: + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case mpFloat: + f = float64(math.Float32frombits(d.r.readUint32())) + case mpDouble: + f = math.Float64frombits(d.r.readUint64()) + default: + f = float64(d.decodeInt(0)) + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) decodeBool() (b bool) { + switch d.bd { + case mpFalse, 0: + // b = false + case mpTrue, 1: + b = true + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) decodeString() (s string) { + clen := d.readContainerLen(msgpackContainerStr) + if clen > 0 { + s = string(d.r.readn(clen)) + } + d.bdRead = false + return +} + +// Callers must check if changed=true (to decide whether to replace the one they have) +func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + // bytes can be decoded from msgpackContainerStr or msgpackContainerBin + var clen int + switch d.bd { + case mpBin8, mpBin16, mpBin32: + clen = d.readContainerLen(msgpackContainerBin) + default: + clen = d.readContainerLen(msgpackContainerStr) + } + // if clen < 0 { + // changed = true + // panic("length cannot be zero. this cannot be nil.") + // } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + // Return changed=true if length of passed slice diff from length of bytes in stream + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +// Every top-level decode funcs (i.e. decodeValue, decode) must call this first. +func (d *msgpackDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *msgpackDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + bd := d.bd + switch bd { + case mpNil: + d.bdType = valueTypeNil + case mpFalse, mpTrue: + d.bdType = valueTypeBool + case mpFloat, mpDouble: + d.bdType = valueTypeFloat + case mpUint8, mpUint16, mpUint32, mpUint64: + d.bdType = valueTypeUint + case mpInt8, mpInt16, mpInt32, mpInt64: + d.bdType = valueTypeInt + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + d.bdType = valueTypeInt + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + d.bdType = valueTypeInt + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + d.bdType = valueTypeString + } else { + d.bdType = valueTypeBytes + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + d.bdType = valueTypeBytes + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + d.bdType = valueTypeArray + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + d.bdType = valueTypeMap + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + d.bdType = valueTypeExt + default: + decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + } + return d.bdType +} + +func (d *msgpackDecDriver) tryDecodeAsNil() bool { + if d.bd == mpNil { + d.bdRead = false + return true + } + return false +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + switch { + case bd == mpNil: + clen = -1 // to represent nil + case bd == ct.b8: + clen = int(d.r.readn1()) + case bd == ct.b16: + clen = int(d.r.readUint16()) + case bd == ct.b32: + clen = int(d.r.readUint32()) + case (ct.bFixMin & bd) == ct.bFixMin: + clen = int(ct.bFixMin ^ bd) + default: + decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) readMapLen() int { + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) readArrayLen() int { + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(d.r.readUint16()) + case mpExt32: + clen = int(d.r.readUint32()) + default: + decErr("decoding ext bytes: found unexpected byte: %x", d.bd) + } + return +} + +func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + xbd := d.bd + switch { + case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32: + xbs, _ = d.decodeBytes(nil) + case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32, + xbd >= mpFixStrMin && xbd <= mpFixStrMax: + xbs = []byte(d.decodeString()) + default: + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(clen) + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). + // + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. + // + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. + WriteExt bool +} + +func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver { + return &msgpackEncDriver{w: w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(r decReader) decDriver { + return &msgpackDecDriver{r: r, h: h} +} + +func (h *MsgpackHandle) writeExt() bool { + return h.WriteExt +} + +func (h *MsgpackHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + + if c.cls { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var b byte + b, err = c.br.ReadByte() + if err != nil { + return + } + if b != fia { + err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) + return + } + + if err = c.read(&b); err != nil { + return + } + if b != expectTypeByte { + err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) + return + } + if err = c.read(msgid); err != nil { + return + } + if err = c.read(methodOrError); err != nil { + return + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py new file mode 100644 index 0000000..e933838 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python + +# This will create golden files in a directory passed to it. +# A Test calls this internally to create the golden files +# So it can process them (so we don't have to checkin the files). + +import msgpack, msgpackrpc, sys, os, threading + +def get_test_data_list(): + # get list with all primitive types, and a combo type + l0 = [ + -8, + -1616, + -32323232, + -6464646464646464, + 192, + 1616, + 32323232, + 6464646464646464, + 192, + -3232.0, + -6464646464.0, + 3232.0, + 6464646464.0, + False, + True, + None, + "someday", + "", + "bytestring", + 1328176922000002000, + -2206187877999998000, + 0, + -6795364578871345152 + ] + l1 = [ + { "true": True, + "false": False }, + { "true": "True", + "false": False, + "uint16(1616)": 1616 }, + { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], + "int32":32323232, "bool": True, + "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", + "SHORT STRING": "1234567890" }, + { True: "true", 8: False, "false": 0 } + ] + + l = [] + l.extend(l0) + l.append(l0) + l.extend(l1) + return l + +def build_test_data(destdir): + l = get_test_data_list() + for i in range(len(l)): + packer = msgpack.Packer() + serialized = packer.pack(l[i]) + f = open(os.path.join(destdir, str(i) + '.golden'), 'wb') + f.write(serialized) + f.close() + +def doRpcServer(port, stopTimeSec): + class EchoHandler(object): + def Echo123(self, msg1, msg2, msg3): + return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) + def EchoStruct(self, msg): + return ("%s" % msg) + + addr = msgpackrpc.Address('localhost', port) + server = msgpackrpc.Server(EchoHandler()) + server.listen(addr) + # run thread to stop it after stopTimeSec seconds if > 0 + if stopTimeSec > 0: + def myStopRpcServer(): + server.stop() + t = threading.Timer(stopTimeSec, myStopRpcServer) + t.start() + server.start() + +def doRpcClientToPythonSvc(port): + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("Echo123", "A1", "B2", "C3") + print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doRpcClientToGoSvc(port): + # print ">>>> port: ", port, " <<<<<" + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) + print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doMain(args): + if len(args) == 2 and args[0] == "testdata": + build_test_data(args[1]) + elif len(args) == 3 and args[0] == "rpc-server": + doRpcServer(int(args[1]), int(args[2])) + elif len(args) == 2 and args[0] == "rpc-client-python-service": + doRpcClientToPythonSvc(int(args[1])) + elif len(args) == 2 and args[0] == "rpc-client-go-service": + doRpcClientToGoSvc(int(args[1])) + else: + print("Usage: msgpack_test.py " + + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") + +if __name__ == "__main__": + doMain(sys.argv[1:]) + diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go new file mode 100644 index 0000000..d014dbd --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go @@ -0,0 +1,152 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "bufio" + "io" + "net/rpc" + "sync" +) + +// Rpc provides a rpc Server or Client Codec for rpc communication. +type Rpc interface { + ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec + ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec +} + +// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer +// used by the rpc connection. It accomodates use-cases where the connection +// should be used by rpc and non-rpc functions, e.g. streaming a file after +// sending an rpc response. +type RpcCodecBuffered interface { + BufferedReader() *bufio.Reader + BufferedWriter() *bufio.Writer +} + +// ------------------------------------- + +// rpcCodec defines the struct members and common methods. +type rpcCodec struct { + rwc io.ReadWriteCloser + dec *Decoder + enc *Encoder + bw *bufio.Writer + br *bufio.Reader + mu sync.Mutex + cls bool +} + +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + bw := bufio.NewWriter(conn) + br := bufio.NewReader(conn) + return rpcCodec{ + rwc: conn, + bw: bw, + br: br, + enc: NewEncoder(bw, h), + dec: NewDecoder(br, h), + } +} + +func (c *rpcCodec) BufferedReader() *bufio.Reader { + return c.br +} + +func (c *rpcCodec) BufferedWriter() *bufio.Writer { + return c.bw +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { + if c.cls { + return io.EOF + } + if err = c.enc.Encode(obj1); err != nil { + return + } + if writeObj2 { + if err = c.enc.Encode(obj2); err != nil { + return + } + } + if doFlush && c.bw != nil { + return c.bw.Flush() + } + return +} + +func (c *rpcCodec) read(obj interface{}) (err error) { + if c.cls { + return io.EOF + } + //If nil is passed in, we should still attempt to read content to nowhere. + if obj == nil { + var obj2 interface{} + return c.dec.Decode(&obj2) + } + return c.dec.Decode(obj) +} + +func (c *rpcCodec) Close() error { + if c.cls { + return io.EOF + } + c.cls = true + return c.rwc.Close() +} + +func (c *rpcCodec) ReadResponseBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +type goRpcCodec struct { + rpcCodec +} + +func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // Must protect for concurrent access as per API + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +// goRpc is the implementation of Rpc that uses the communication protocol +// as defined in net/rpc package. +type goRpc struct{} + +// GoRpc implements Rpc using the communication protocol defined in net/rpc package. +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var GoRpc goRpc + +func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go new file mode 100644 index 0000000..9e4d148 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go @@ -0,0 +1,461 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import "math" + +const ( + _ uint8 = iota + simpleVdNil = 1 + simpleVdFalse = 2 + simpleVdTrue = 3 + simpleVdFloat32 = 4 + simpleVdFloat64 = 5 + + // each lasts for 4 (ie n, n+1, n+2, n+3) + simpleVdPosInt = 8 + simpleVdNegInt = 12 + + // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) + simpleVdString = 216 + simpleVdByteArray = 224 + simpleVdArray = 232 + simpleVdMap = 240 + simpleVdExt = 248 +) + +type simpleEncDriver struct { + h *SimpleHandle + w encWriter + //b [8]byte +} + +func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool { + return false +} + +func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) { +} + +func (e *simpleEncDriver) encodeNil() { + e.w.writen1(simpleVdNil) +} + +func (e *simpleEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(simpleVdTrue) + } else { + e.w.writen1(simpleVdFalse) + } +} + +func (e *simpleEncDriver) encodeFloat32(f float32) { + e.w.writen1(simpleVdFloat32) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *simpleEncDriver) encodeFloat64(f float64) { + e.w.writen1(simpleVdFloat64) + e.w.writeUint64(math.Float64bits(f)) +} + +func (e *simpleEncDriver) encodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-v), simpleVdNegInt) + } else { + e.encUint(uint64(v), simpleVdPosInt) + } +} + +func (e *simpleEncDriver) encodeUint(v uint64) { + e.encUint(v, simpleVdPosInt) +} + +func (e *simpleEncDriver) encUint(v uint64, bd uint8) { + switch { + case v <= math.MaxUint8: + e.w.writen2(bd, uint8(v)) + case v <= math.MaxUint16: + e.w.writen1(bd + 1) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.w.writen1(bd + 2) + e.w.writeUint32(uint32(v)) + case v <= math.MaxUint64: + e.w.writen1(bd + 3) + e.w.writeUint64(v) + } +} + +func (e *simpleEncDriver) encLen(bd byte, length int) { + switch { + case length == 0: + e.w.writen1(bd) + case length <= math.MaxUint8: + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) + case length <= math.MaxUint16: + e.w.writen1(bd + 2) + e.w.writeUint16(uint16(length)) + case int64(length) <= math.MaxUint32: + e.w.writen1(bd + 3) + e.w.writeUint32(uint32(length)) + default: + e.w.writen1(bd + 4) + e.w.writeUint64(uint64(length)) + } +} + +func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(simpleVdExt, length) + e.w.writen1(xtag) +} + +func (e *simpleEncDriver) encodeArrayPreamble(length int) { + e.encLen(simpleVdArray, length) +} + +func (e *simpleEncDriver) encodeMapPreamble(length int) { + e.encLen(simpleVdMap, length) +} + +func (e *simpleEncDriver) encodeString(c charEncoding, v string) { + e.encLen(simpleVdString, len(v)) + e.w.writestr(v) +} + +func (e *simpleEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) +} + +func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) { + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +//------------------------------------ + +type simpleDecDriver struct { + h *SimpleHandle + r decReader + bdRead bool + bdType valueType + bd byte + //b [8]byte +} + +func (d *simpleDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *simpleDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.bd { + case simpleVdNil: + d.bdType = valueTypeNil + case simpleVdTrue, simpleVdFalse: + d.bdType = valueTypeBool + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + d.bdType = valueTypeUint + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + d.bdType = valueTypeInt + case simpleVdFloat32, simpleVdFloat64: + d.bdType = valueTypeFloat + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + d.bdType = valueTypeString + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + d.bdType = valueTypeBytes + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + d.bdType = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + d.bdType = valueTypeArray + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd) + } + } + return d.bdType +} + +func (d *simpleDecDriver) tryDecodeAsNil() bool { + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return false +} + +func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool { + return false +} + +func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) { +} + +func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) { + switch d.bd { + case simpleVdPosInt: + ui = uint64(d.r.readn1()) + i = int64(ui) + case simpleVdPosInt + 1: + ui = uint64(d.r.readUint16()) + i = int64(ui) + case simpleVdPosInt + 2: + ui = uint64(d.r.readUint32()) + i = int64(ui) + case simpleVdPosInt + 3: + ui = uint64(d.r.readUint64()) + i = int64(ui) + case simpleVdNegInt: + ui = uint64(d.r.readn1()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 1: + ui = uint64(d.r.readUint16()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 2: + ui = uint64(d.r.readUint32()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 3: + ui = uint64(d.r.readUint64()) + i = -(int64(ui)) + neg = true + default: + decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + } + // don't do this check, because callers may only want the unsigned value. + // if ui > math.MaxInt64 { + // decErr("decIntAny: Integer out of range for signed int64: %v", ui) + // } + return +} + +func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() + if neg { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + checkOverflow(ui, 0, bitsize) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case simpleVdFloat32: + f = float64(math.Float32frombits(d.r.readUint32())) + case simpleVdFloat64: + f = math.Float64frombits(d.r.readUint64()) + default: + if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { + _, i, _ := d.decIntAny() + f = float64(i) + } else { + decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + } + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *simpleDecDriver) decodeBool() (b bool) { + switch d.bd { + case simpleVdTrue: + b = true + case simpleVdFalse: + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) readMapLen() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) readArrayLen() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) decLen() int { + switch d.bd % 8 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(d.r.readUint16()) + case 3: + ui := uint64(d.r.readUint32()) + checkOverflow(ui, 0, intBitsize) + return int(ui) + case 4: + ui := d.r.readUint64() + checkOverflow(ui, 0, intBitsize) + return int(ui) + } + decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriver) decodeString() (s string) { + s = string(d.r.readn(d.decLen())) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + if clen := d.decLen(); clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(l) + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs, _ = d.decodeBytes(nil) + default: + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + + switch d.bd { + case simpleVdNil: + vt = valueTypeNil + case simpleVdFalse: + vt = valueTypeBool + v = false + case simpleVdTrue: + vt = valueTypeBool + v = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + vt = valueTypeUint + ui, _, _ := d.decIntAny() + v = ui + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + vt = valueTypeInt + _, i, _ := d.decIntAny() + v = i + case simpleVdFloat32: + vt = valueTypeFloat + v = d.decodeFloat(true) + case simpleVdFloat64: + vt = valueTypeFloat + v = d.decodeFloat(false) + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + vt = valueTypeString + v = d.decodeString() + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + vt = valueTypeExt + l := d.decLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + vt = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + vt = valueTypeMap + decodeFurther = true + default: + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceeded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Lenght of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + BasicHandle +} + +func (h *SimpleHandle) newEncDriver(w encWriter) encDriver { + return &simpleEncDriver{w: w, h: h} +} + +func (h *SimpleHandle) newDecDriver(r decReader) decDriver { + return &simpleDecDriver{r: r, h: h} +} + +func (_ *SimpleHandle) writeExt() bool { + return true +} + +func (h *SimpleHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} + +var _ decDriver = (*simpleDecDriver)(nil) +var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/time.go b/vendor/github.com/hashicorp/go-msgpack/codec/time.go new file mode 100644 index 0000000..c86d653 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/time.go @@ -0,0 +1,193 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "time" +) + +var ( + timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} +) + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func encodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// DecodeTime decodes a []byte into a time.Time. +func decodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + //tzname[3] = '-' (TODO: verify. this works here) + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +func timeLocUTCName(tzint int16) string { + if tzint == 0 { + return "UTC" + } + var tzname = []byte("UTC+00:00") + //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. + //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first + var tzhr, tzmin int16 + if tzint < 0 { + tzname[3] = '-' // (TODO: verify. this works here) + tzhr, tzmin = -tzint/60, (-tzint)%60 + } else { + tzhr, tzmin = tzint/60, tzint%60 + } + tzname[4] = timeDigits[tzhr/10] + tzname[5] = timeDigits[tzhr%10] + tzname[7] = timeDigits[tzmin/10] + tzname[8] = timeDigits[tzmin%10] + return string(tzname) + //return time.FixedZone(string(tzname), int(tzint)*60) +} diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml new file mode 100644 index 0000000..304a835 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.x + +branches: + only: + - master + +script: make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 0000000..82b4de9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 0000000..b97cd6e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 0000000..ead5830 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,97 @@ +# go-multierror + +[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: https://travis-ci.org/hashicorp/go-multierror +[godocs]: https://godoc.org/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` implements the +[errwrap](https://github.com/hashicorp/errwrap) interface so that it can +be used with that library, as well. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-multierror + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 0000000..775b6e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,41 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 0000000..aab8e9a --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 0000000..47f13c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod new file mode 100644 index 0000000..2534331 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/go-multierror + +require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum new file mode 100644 index 0000000..85b1f8f --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 0000000..89b1422 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,51 @@ +package multierror + +import ( + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementation of the errwrap.Wrapper interface so that +// multierror.Error can be used with that library. +// +// This method is not safe to be called concurrently and is no different +// than accessing the Errors field directly. It is implemented only to +// satisfy the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + return e.Errors +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 0000000..5c477ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 0000000..fecb14e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore new file mode 100644 index 0000000..e43b0f9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE new file mode 100644 index 0000000..82b4de9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md new file mode 100644 index 0000000..fe305ad --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -0,0 +1,168 @@ +# Go Plugin System over RPC + +`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system +that has been in use by HashiCorp tooling for over 4 years. While initially +created for [Packer](https://www.packer.io), it is additionally in use by +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and +[Vault](https://www.vaultproject.io). + +While the plugin system is over RPC, it is currently only designed to work +over a local [reliable] network. Plugins over a real network are not supported +and will lead to unexpected behavior. + +This plugin system has been used on millions of machines across many different +projects and has proven to be battle hardened and ready for production use. + +## Features + +The HashiCorp plugin system supports a number of features: + +**Plugins are Go interface implementations.** This makes writing and consuming +plugins feel very natural. To a plugin author: you just implement an +interface as if it were going to run in the same process. For a plugin user: +you just use and call functions on an interface as if it were in the same +process. This plugin system handles the communication in between. + +**Cross-language support.** Plugins can be written (and consumed) by +almost every major language. This library supports serving plugins via +[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written +in any language. + +**Complex arguments and return values are supported.** This library +provides APIs for handling complex arguments and return values such +as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library +(`MuxBroker`) for creating new connections between the client/server to +serve additional interfaces or transfer raw data. + +**Bidirectional communication.** Because the plugin system supports +complex arguments, the host process can send it interface implementations +and the plugin can call back into the host process. + +**Built-in Logging.** Any plugins that use the `log` standard library +will have log data automatically sent to the host process. The host +process will mirror this output prefixed with the path to the plugin +binary. This makes debugging with plugins simple. If the host system +uses [hclog](https://github.com/hashicorp/go-hclog) then the log data +will be structured. If the plugin also uses hclog, logs from the plugin +will be sent to the host hclog and be structured. + +**Protocol Versioning.** A very basic "protocol version" is supported that +can be incremented to invalidate any previous plugins. This is useful when +interface signatures are changing, protocol level changes are necessary, +etc. When a protocol version is incompatible, a human friendly error +message is shown to the end user. + +**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue +to use stdout/stderr as usual and the output will get mirrored back to +the host process. The host process can control what `io.Writer` these +streams go to to prevent this from happening. + +**TTY Preservation.** Plugin subprocesses are connected to the identical +stdin file descriptor as the host process, allowing software that requires +a TTY to work. For example, a plugin can execute `ssh` and even though there +are multiple subprocesses and RPC happening, it will look and act perfectly +to the end user. + +**Host upgrade while a plugin is running.** Plugins can be "reattached" +so that the host process can be upgraded while the plugin is still running. +This requires the host/plugin to know this is possible and daemonize +properly. `NewClient` takes a `ReattachConfig` to determine if and how to +reattach. + +**Cryptographically Secure Plugins.** Plugins can be verified with an expected +checksum and RPC communications can be configured to use TLS. The host process +must be properly secured to protect this configuration. + +## Architecture + +The HashiCorp plugin system works by launching subprocesses and communicating +over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single +connection is made between any plugin and the host process. For net/rpc-based +plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. For gRPC-based plugins, +the HTTP2 protocol handles multiplexing. + +This architecture has a number of benefits: + + * Plugins can't crash your host process: A panic in a plugin doesn't + panic the plugin user. + + * Plugins are very easy to write: just write a Go application and `go build`. + Or use any other language to write a gRPC server with a tiny amount of + boilerplate to support go-plugin. + + * Plugins are very easy to install: just put the binary in a location where + the host will find it (depends on the host but this library also provides + helpers), and the plugin host handles the rest. + + * Plugins can be relatively secure: The plugin only has access to the + interfaces and args given to it, not to the entire memory space of the + process. Additionally, go-plugin can communicate with the plugin over + TLS. + +## Usage + +To use the plugin system, you must take the following steps. These are +high-level steps that must be done. Examples are available in the +`examples/` directory. + + 1. Choose the interface(s) you want to expose for plugins. + + 2. For each interface, implement an implementation of that interface + that communicates over a `net/rpc` connection or over a + [gRPC](http://www.grpc.io) connection or both. You'll have to implement + both a client and server implementation. + + 3. Create a `Plugin` implementation that knows how to create the RPC + client/server for a given plugin type. + + 4. Plugin authors call `plugin.Serve` to serve a plugin from the + `main` function. + + 5. Plugin users use `plugin.Client` to launch a subprocess and request + an interface implementation over RPC. + +That's it! In practice, step 2 is the most tedious and time consuming step. +Even so, it isn't very difficult and you can see examples in the `examples/` +directory as well as throughout our various open source projects. + +For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). + +## Roadmap + +Our plugin system is constantly evolving. As we use the plugin system for +new projects or for new features in existing projects, we constantly find +improvements we can make. + +At this point in time, the roadmap for the plugin system is: + +**Semantic Versioning.** Plugins will be able to implement a semantic version. +This plugin system will give host processes a system for constraining +versions. This is in addition to the protocol versioning already present +which is more for larger underlying changes. + +**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter) +to support automatic download + install of plugins. Paired with cryptographically +secure plugins (above), we can make this a safe operation for an amazing +user experience. + +## What About Shared Libraries? + +When we started using plugins (late 2012, early 2013), plugins over RPC +were the only option since Go didn't support dynamic library loading. Today, +Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with +a number of limitations. Since 2012, our plugin system has stabilized +from tens of millions of users using it, and has many benefits we've come to +value greatly. + +For example, we use this plugin system in +[Vault](https://www.vaultproject.io) where dynamic library loading is +not acceptable for security reasons. That is an extreme +example, but we believe our library system has more upsides than downsides +over dynamic library loading and since we've had it built and tested for years, +we'll continue to use it. + +Shared libraries have one major advantage over our system which is much +higher performance. In real world scenarios across our various tools, +we've never required any more performance out of our plugin system and it +has seen very high throughput, so this isn't a concern for us at the moment. diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go new file mode 100644 index 0000000..42de0fc --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -0,0 +1,973 @@ +package plugin + +import ( + "bufio" + "context" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode" + + hclog "github.com/hashicorp/go-hclog" +) + +// If this is 1, then we've called CleanupClients. This can be used +// by plugin RPC implementations to change error behavior since you +// can expected network connection errors at this point. This should be +// read by using sync/atomic. +var Killed uint32 = 0 + +// This is a slice of the "managed" clients which are cleaned up when +// calling Cleanup +var managedClients = make([]*Client, 0, 5) +var managedClientsLock sync.Mutex + +// Error types +var ( + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") +) + +// Client handles the lifecycle of a plugin application. It launches +// plugins, connects to them, dispenses interface implementations, and handles +// killing the process. +// +// Plugin hosts should use one Client for each plugin executable. To +// dispense a plugin type, use the `Client.Client` function, and then +// cal `Dispense`. This awkward API is mostly historical but is used to split +// the client that deals with subprocess management and the client that +// does RPC management. +// +// See NewClient and ClientConfig for using a Client. +type Client struct { + config *ClientConfig + exited bool + l sync.Mutex + address net.Addr + process *os.Process + client ClientProtocol + protocol Protocol + logger hclog.Logger + doneCtx context.Context + ctxCancel context.CancelFunc + negotiatedVersion int + + // clientWaitGroup is used to manage the lifecycle of the plugin management + // goroutines. + clientWaitGroup sync.WaitGroup + + // processKilled is used for testing only, to flag when the process was + // forcefully killed. + processKilled bool +} + +// NegotiatedVersion returns the protocol version negotiated with the server. +// This is only valid after Start() is called. +func (c *Client) NegotiatedVersion() int { + return c.negotiatedVersion +} + +// ClientConfig is the configuration used to initialize a new +// plugin client. After being used to initialize a plugin client, +// that configuration must not be modified again. +type ClientConfig struct { + // HandshakeConfig is the configuration that must match servers. + HandshakeConfig + + // Plugins are the plugins that can be consumed. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // One of the following must be set, but not both. + // + // Cmd is the unstarted subprocess for starting the plugin. If this is + // set, then the Client starts the plugin process on its own and connects + // to it. + // + // Reattach is configuration for reattaching to an existing plugin process + // that is already running. This isn't common. + Cmd *exec.Cmd + Reattach *ReattachConfig + + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + + // Managed represents if the client should be managed by the + // plugin package or not. If true, then by calling CleanupClients, + // it will automatically be cleaned up. Otherwise, the client + // user is fully responsible for making sure to Kill all plugin + // clients. By default the client is _not_ managed. + Managed bool + + // The minimum and maximum port to use for communicating with + // the subprocess. If not set, this defaults to 10,000 and 25,000 + // respectively. + MinPort, MaxPort uint + + // StartTimeout is the timeout to wait for the plugin to say it + // has started successfully. + StartTimeout time.Duration + + // If non-nil, then the stderr of the client will be written to here + // (as well as the log). This is the original os.Stderr of the subprocess. + // This isn't the output of synced stderr. + Stderr io.Writer + + // SyncStdout, SyncStderr can be set to override the + // respective os.Std* values in the plugin. Care should be taken to + // avoid races here. If these are nil, then this will automatically be + // hooked up to os.Stdin, Stdout, and Stderr, respectively. + // + // If the default values (nil) are used, then this package will not + // sync any of these streams. + SyncStdout io.Writer + SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol + + // Logger is the logger that the client will used. If none is provided, + // it will default to hclog's default logger. + Logger hclog.Logger + + // AutoMTLS has the client and server automatically negotiate mTLS for + // transport authentication. This ensures that only the original client will + // be allowed to connect to the server, and all other connections will be + // rejected. The client will also refuse to connect to any server that isn't + // the original instance started by the client. + // + // In this mode of operation, the client generates a one-time use tls + // certificate, sends the public x.509 certificate to the new server, and + // the server generates a one-time use tls certificate, and sends the public + // x.509 certificate back to the client. These are used to authenticate all + // rpc connections between the client and server. + // + // Setting AutoMTLS to true implies that the server must support the + // protocol, and correctly negotiate the tls certificates, or a connection + // failure will result. + // + // The client should not set TLSConfig, nor should the server set a + // TLSProvider, because AutoMTLS implies that a new certificate and tls + // configuration will be generated at startup. + // + // You cannot Reattach to a server with this option enabled. + AutoMTLS bool +} + +// ReattachConfig is used to configure a client to reattach to an +// already-running plugin process. You can retrieve this information by +// calling ReattachConfig on Client. +type ReattachConfig struct { + Protocol Protocol + Addr net.Addr + Pid int +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil +} + +// This makes sure all the managed subprocesses are killed and properly +// logged. This should be called before the parent process running the +// plugins exits. +// +// This must only be called _once_. +func CleanupClients() { + // Set the killed to true so that we don't get unexpected panics + atomic.StoreUint32(&Killed, 1) + + // Kill all the managed clients in parallel and use a WaitGroup + // to wait for them all to finish up. + var wg sync.WaitGroup + managedClientsLock.Lock() + for _, client := range managedClients { + wg.Add(1) + + go func(client *Client) { + client.Kill() + wg.Done() + }(client) + } + managedClientsLock.Unlock() + + wg.Wait() +} + +// Creates a new plugin client which manages the lifecycle of an external +// plugin and gets the address for the RPC connection. +// +// The client must be cleaned up at some point by calling Kill(). If +// the client is a managed client (created with NewManagedClient) you +// can just call CleanupClients at the end of your program and they will +// be properly cleaned. +func NewClient(config *ClientConfig) (c *Client) { + if config.MinPort == 0 && config.MaxPort == 0 { + config.MinPort = 10000 + config.MaxPort = 25000 + } + + if config.StartTimeout == 0 { + config.StartTimeout = 1 * time.Minute + } + + if config.Stderr == nil { + config.Stderr = ioutil.Discard + } + + if config.SyncStdout == nil { + config.SyncStdout = ioutil.Discard + } + if config.SyncStderr == nil { + config.SyncStderr = ioutil.Discard + } + + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: hclog.Trace, + Name: "plugin", + }) + } + + c = &Client{ + config: config, + logger: config.Logger, + } + if config.Managed { + managedClientsLock.Lock() + managedClients = append(managedClients, c) + managedClientsLock.Unlock() + } + + return +} + +// Client returns the protocol client for this connection. +// +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() + if err != nil { + return nil, err + } + + c.l.Lock() + defer c.l.Unlock() + + if c.client != nil { + return c.client, nil + } + + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) + + case ProtocolGRPC: + c.client, err = newGRPCClient(c.doneCtx, c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) + } + + if err != nil { + c.client = nil + return nil, err + } + + return c.client, nil +} + +// Tells whether or not the underlying process has exited. +func (c *Client) Exited() bool { + c.l.Lock() + defer c.l.Unlock() + return c.exited +} + +// killed is used in tests to check if a process failed to exit gracefully, and +// needed to be killed. +func (c *Client) killed() bool { + c.l.Lock() + defer c.l.Unlock() + return c.processKilled +} + +// End the executing subprocess (if it is running) and perform any cleanup +// tasks necessary such as capturing any remaining logs and so on. +// +// This method blocks until the process successfully exits. +// +// This method can safely be called multiple times. +func (c *Client) Kill() { + // Grab a lock to read some private fields. + c.l.Lock() + process := c.process + addr := c.address + c.l.Unlock() + + // If there is no process, there is nothing to kill. + if process == nil { + return + } + + defer func() { + // Wait for the all client goroutines to finish. + c.clientWaitGroup.Wait() + + // Make sure there is no reference to the old process after it has been + // killed. + c.l.Lock() + c.process = nil + c.l.Unlock() + }() + + // We need to check for address here. It is possible that the plugin + // started (process != nil) but has no address (addr == nil) if the + // plugin failed at startup. If we do have an address, we need to close + // the plugin net connections. + graceful := false + if addr != nil { + // Close the client to cleanly exit the process. + client, err := c.Client() + if err == nil { + err = client.Close() + + // If there is no error, then we attempt to wait for a graceful + // exit. If there was an error, we assume that graceful cleanup + // won't happen and just force kill. + graceful = err == nil + if err != nil { + // If there was an error just log it. We're going to force + // kill in a moment anyways. + c.logger.Warn("error closing client during Kill", "err", err) + } + } else { + c.logger.Error("client", "error", err) + } + } + + // If we're attempting a graceful exit, then we wait for a short period + // of time to allow that to happen. To wait for this we just wait on the + // doneCh which would be closed if the process exits. + if graceful { + select { + case <-c.doneCtx.Done(): + c.logger.Debug("plugin exited") + return + case <-time.After(2 * time.Second): + } + } + + // If graceful exiting failed, just kill it + c.logger.Warn("plugin failed to exit gracefully") + process.Kill() + + c.l.Lock() + c.processKilled = true + c.l.Unlock() +} + +// Starts the underlying subprocess, communicating with it to negotiate +// a port for RPC connections, and returning the address to connect via RPC. +// +// This method is safe to call multiple times. Subsequent calls have no effect. +// Once a client has been started once, it cannot be started again, even if +// it was killed. +func (c *Client) Start() (addr net.Addr, err error) { + c.l.Lock() + defer c.l.Unlock() + + if c.address != nil { + return c.address, nil + } + + // If one of cmd or reattach isn't set, then it is an error. We wrap + // this in a {} for scoping reasons, and hopeful that the escape + // analysis will pop the stack here. + { + cmdSet := c.config.Cmd != nil + attachSet := c.config.Reattach != nil + secureSet := c.config.SecureConfig != nil + if cmdSet == attachSet { + return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") + } + + if secureSet && attachSet { + return nil, ErrSecureConfigAndReattach + } + } + + if c.config.Reattach != nil { + return c.reattach() + } + + if c.config.VersionedPlugins == nil { + c.config.VersionedPlugins = make(map[int]PluginSet) + } + + // handle all plugins as versioned, using the handshake config as the default. + version := int(c.config.ProtocolVersion) + + // Make sure we're not overwriting a real version 0. If ProtocolVersion was + // non-zero, then we have to just assume the user made sure that + // VersionedPlugins doesn't conflict. + if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { + c.config.VersionedPlugins[version] = c.config.Plugins + } + + var versionStrings []string + for v := range c.config.VersionedPlugins { + versionStrings = append(versionStrings, strconv.Itoa(v)) + } + + env := []string{ + fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), + fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), + fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), + } + + cmd := c.config.Cmd + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin + + cmdStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + cmdStderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + if c.config.SecureConfig != nil { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + // Setup a temporary certificate for client/server mtls, and send the public + // certificate to the plugin. + if c.config.AutoMTLS { + c.logger.Info("configuring client automatic mTLS") + certPEM, keyPEM, err := generateCert() + if err != nil { + c.logger.Error("failed to generate client certificate", "error", err) + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + c.logger.Error("failed to parse client certificate", "error", err) + return nil, err + } + + cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) + + c.config.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ServerName: "localhost", + } + } + + c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) + err = cmd.Start() + if err != nil { + return + } + + // Set the process + c.process = cmd.Process + c.logger.Debug("plugin started", "path", cmd.Path, "pid", c.process.Pid) + + // Make sure the command is properly cleaned up if there is an error + defer func() { + r := recover() + + if err != nil || r != nil { + cmd.Process.Kill() + } + + if r != nil { + panic(r) + } + }() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + go func() { + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + defer c.clientWaitGroup.Done() + + // get the cmd info early, since the process information will be removed + // in Kill. + pid := c.process.Pid + path := cmd.Path + + // Wait for the command to end. + err := cmd.Wait() + + debugMsgArgs := []interface{}{ + "path", path, + "pid", pid, + } + if err != nil { + debugMsgArgs = append(debugMsgArgs, + []interface{}{"error", err.Error()}...) + } + + // Log and make sure to flush the logs write away + c.logger.Debug("plugin process exited", debugMsgArgs...) + os.Stderr.Sync() + + // Set that we exited, which takes a lock + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }() + + // Start goroutine that logs the stderr + c.clientWaitGroup.Add(1) + // logStderr calls Done() + go c.logStderr(cmdStderr) + + // Start a goroutine that is going to be reading the lines + // out of stdout + linesCh := make(chan string) + c.clientWaitGroup.Add(1) + go func() { + defer c.clientWaitGroup.Done() + defer close(linesCh) + + scanner := bufio.NewScanner(cmdStdout) + for scanner.Scan() { + linesCh <- scanner.Text() + } + }() + + // Make sure after we exit we read the lines from stdout forever + // so they don't block since it is a pipe. + // The scanner goroutine above will close this, but track it with a wait + // group for completeness. + c.clientWaitGroup.Add(1) + defer func() { + go func() { + defer c.clientWaitGroup.Done() + for range linesCh { + } + }() + }() + + // Some channels for the next step + timeout := time.After(c.config.StartTimeout) + + // Start looking for the address + c.logger.Debug("waiting for RPC address", "path", cmd.Path) + select { + case <-timeout: + err = errors.New("timeout while waiting for plugin to start") + case <-c.doneCtx.Done(): + err = errors.New("plugin exited before we could connect") + case line := <-linesCh: + // Trim the line and split by "|" in order to get the parts of + // the output. + line = strings.TrimSpace(line) + parts := strings.SplitN(line, "|", 6) + if len(parts) < 4 { + err = fmt.Errorf( + "Unrecognized remote plugin message: %s\n\n"+ + "This usually means that the plugin is either invalid or simply\n"+ + "needs to be recompiled to support the latest protocol.", line) + return + } + + // Check the core protocol. Wrapped in a {} for scoping. + { + var coreProtocol int64 + coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) + if err != nil { + err = fmt.Errorf("Error parsing core protocol version: %s", err) + return + } + + if int(coreProtocol) != CoreProtocolVersion { + err = fmt.Errorf("Incompatible core API version with plugin. "+ + "Plugin version: %s, Core version: %d\n\n"+ + "To fix this, the plugin usually only needs to be recompiled.\n"+ + "Please report this to the plugin author.", parts[0], CoreProtocolVersion) + return + } + } + + // Test the API version + version, pluginSet, err := c.checkProtoVersion(parts[1]) + if err != nil { + return addr, err + } + + // set the Plugins value to the compatible set, so the version + // doesn't need to be passed through to the ClientProtocol + // implementation. + c.config.Plugins = pluginSet + c.negotiatedVersion = version + c.logger.Debug("using plugin", "version", version) + + switch parts[2] { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", parts[3]) + case "unix": + addr, err = net.ResolveUnixAddr("unix", parts[3]) + default: + err = fmt.Errorf("Unknown address type: %s", parts[3]) + } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return addr, err + } + + // See if we have a TLS certificate from the server. + // Checking if the length is > 50 rules out catching the unused "extra" + // data returned from some older implementations. + if len(parts) >= 6 && len(parts[5]) > 50 { + err := c.loadServerCert(parts[5]) + if err != nil { + return nil, fmt.Errorf("error parsing server cert: %s", err) + } + } + } + + c.address = addr + return +} + +// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the +// server, and load it as the RootCA for the client TLSConfig. +func (c *Client) loadServerCert(cert string) error { + certPool := x509.NewCertPool() + + asn1, err := base64.RawStdEncoding.DecodeString(cert) + if err != nil { + return err + } + + x509Cert, err := x509.ParseCertificate([]byte(asn1)) + if err != nil { + return err + } + + certPool.AddCert(x509Cert) + + c.config.TLSConfig.RootCAs = certPool + return nil +} + +func (c *Client) reattach() (net.Addr, error) { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + return nil, err + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + // Goroutine to mark exit status + go func(pid int) { + defer c.clientWaitGroup.Done() + + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + c.logger.Debug("reattached plugin process exited") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }(p.Pid) + + // Set the address and process + c.address = c.config.Reattach.Addr + c.process = p + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } + + return c.address, nil +} + +// checkProtoVersion returns the negotiated version and PluginSet. +// This returns an error if the server returned an incompatible protocol +// version, or an invalid handshake response. +func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { + serverVersion, err := strconv.Atoi(protoVersion) + if err != nil { + return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) + } + + // record these for the error message + var clientVersions []int + + // all versions, including the legacy ProtocolVersion have been added to + // the versions set + for version, plugins := range c.config.VersionedPlugins { + clientVersions = append(clientVersions, version) + + if serverVersion != version { + continue + } + return version, plugins, nil + } + + return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) +} + +// ReattachConfig returns the information that must be provided to NewClient +// to reattach to the plugin process that this client started. This is +// useful for plugins that detach from their parent process. +// +// If this returns nil then the process hasn't been started yet. Please +// call Start or Client before calling this. +func (c *Client) ReattachConfig() *ReattachConfig { + c.l.Lock() + defer c.l.Unlock() + + if c.address == nil { + return nil + } + + if c.config.Cmd != nil && c.config.Cmd.Process == nil { + return nil + } + + // If we connected via reattach, just return the information as-is + if c.config.Reattach != nil { + return c.config.Reattach + } + + return &ReattachConfig{ + Protocol: c.protocol, + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { + return func(_ string, _ time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + return conn, nil + } +} + +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + conn, err := netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err + } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil +} + +func (c *Client) logStderr(r io.Reader) { + defer c.clientWaitGroup.Done() + + scanner := bufio.NewScanner(r) + l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + + for scanner.Scan() { + line := scanner.Text() + c.config.Stderr.Write([]byte(line + "\n")) + line = strings.TrimRightFunc(line, unicode.IsSpace) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + l.Debug(line) + } else { + out := flattenKVPairs(entry.KVPairs) + + out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + } + } + } + + if err := scanner.Err(); err != nil { + l.Error("reading plugin stderr", "error", err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go new file mode 100644 index 0000000..d22c566 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "path/filepath" +) + +// Discover discovers plugins that are in a given directory. +// +// The directory doesn't need to be absolute. For example, "." will work fine. +// +// This currently assumes any file matching the glob is a plugin. +// In the future this may be smarter about checking that a file is +// executable and so on. +// +// TODO: test +func Discover(glob, dir string) ([]string, error) { + var err error + + // Make the directory absolute if it isn't already + if !filepath.IsAbs(dir) { + dir, err = filepath.Abs(dir) + if err != nil { + return nil, err + } + } + + return filepath.Glob(filepath.Join(dir, glob)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go new file mode 100644 index 0000000..22a7baa --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -0,0 +1,24 @@ +package plugin + +// This is a type that wraps error types so that they can be messaged +// across RPC channels. Since "error" is an interface, we can't always +// gob-encode the underlying structure. This is a valid error interface +// implementer that we will push across. +type BasicError struct { + Message string +} + +// NewBasicError is used to create a BasicError. +// +// err is allowed to be nil. +func NewBasicError(err error) *BasicError { + if err == nil { + return nil + } + + return &BasicError{err.Error()} +} + +func (e *BasicError) Error() string { + return e.Message +} diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod new file mode 100644 index 0000000..f3ddf44 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -0,0 +1,17 @@ +module github.com/hashicorp/go-plugin + +require ( + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/protobuf v1.2.0 + github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb + github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 + github.com/oklog/run v1.0.0 + github.com/stretchr/testify v1.3.0 // indirect + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect + golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc // indirect + golang.org/x/text v0.3.0 // indirect + google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect + google.golang.org/grpc v1.14.0 +) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum new file mode 100644 index 0000000..21b14e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -0,0 +1,31 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go new file mode 100644 index 0000000..daf142d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -0,0 +1,457 @@ +package plugin + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + + "github.com/oklog/run" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// streamer interface is used in the broker to send/receive connection +// information. +type streamer interface { + Send(*plugin.ConnInfo) error + Recv() (*plugin.ConnInfo, error) + Close() +} + +// sendErr is used to pass errors back during a send. +type sendErr struct { + i *plugin.ConnInfo + ch chan error +} + +// gRPCBrokerServer is used by the plugin to start a stream and to send +// connection information to/from the plugin. Implements GRPCBrokerServer and +// streamer interfaces. +type gRPCBrokerServer struct { + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerServer() *gRPCBrokerServer { + return &gRPCBrokerServer{ + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerServer interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the client. +func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error { + doneCh := stream.Context().Done() + defer s.Close() + + // Proccess send stream + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + // Process receive stream + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the client. +func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the client from the stream to the broker. +func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerServer) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// gRPCBrokerClientImpl is used by the client to start a stream and to send +// connection information to/from the client. Implements GRPCBrokerClient and +// streamer interfaces. +type gRPCBrokerClientImpl struct { + // client is the underlying GRPC client used to make calls to the server. + client plugin.GRPCBrokerClient + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { + return &gRPCBrokerClientImpl{ + client: plugin.NewGRPCBrokerClient(conn), + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerClient interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the plugin. +func (s *gRPCBrokerClientImpl) StartStream() error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + defer s.Close() + + stream, err := s.client.StartStream(ctx) + if err != nil { + return err + } + doneCh := stream.Context().Done() + + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the plugin. +func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the plugin to the broker. +func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerClientImpl) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// GRPCBroker is responsible for brokering connections by unique ID. +// +// It is used by plugins to create multiple gRPC connections and data +// streams between the plugin process and the host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type GRPCBroker struct { + nextId uint32 + streamer streamer + streams map[uint32]*gRPCBrokerPending + tls *tls.Config + doneCh chan struct{} + o sync.Once + + sync.Mutex +} + +type gRPCBrokerPending struct { + ch chan *plugin.ConnInfo + doneCh chan struct{} +} + +func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { + return &GRPCBroker{ + streamer: s, + streams: make(map[uint32]*gRPCBrokerPending), + tls: tls, + doneCh: make(chan struct{}), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + listener, err := serverListener() + if err != nil { + return nil, err + } + + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Network: listener.Addr().Network(), + Address: listener.Addr().String(), + }) + if err != nil { + return nil, err + } + + return listener, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve a gRPC server on that stream ID. This is used to easily serve +// complex arguments. Each AcceptAndServe call opens a new listener socket and +// sends the connection info down the stream to the dialer. Since a new +// connection is opened every call, these calls should be used sparingly. +// Multiple gRPC server implementations can be registered to a single +// AcceptAndServe call. +func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { + listener, err := b.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + defer listener.Close() + + var opts []grpc.ServerOption + if b.tls != nil { + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} + } + + server := s(opts) + + // Here we use a run group to close this goroutine if the server is shutdown + // or the broker is shutdown. + var g run.Group + { + // Serve on the listener, if shutting down call GracefulStop. + g.Add(func() error { + return server.Serve(listener) + }, func(err error) { + server.GracefulStop() + }) + } + { + // block on the closeCh or the doneCh. If we are shutting down close the + // closeCh. + closeCh := make(chan struct{}) + g.Add(func() error { + select { + case <-b.doneCh: + case <-closeCh: + } + return nil + }, func(err error) { + close(closeCh) + }) + } + + // Block until we are done + g.Run() +} + +// Close closes the stream and all servers. +func (b *GRPCBroker) Close() error { + b.streamer.Close() + b.o.Do(func() { + close(b.doneCh) + }) + return nil +} + +// Dial opens a connection by ID. +func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + var c *plugin.ConnInfo + + // Open the stream + p := b.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + return nil, fmt.Errorf("timeout waiting for connection info") + } + + var addr net.Addr + switch c.Network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", c.Address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", c.Address) + default: + err = fmt.Errorf("Unknown address type: %s", c.Address) + } + if err != nil { + return nil, err + } + + return dialGRPCConn(b.tls, netAddrDialer(addr)) +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of calls. In practice +// we've never seen it happen. +func (m *GRPCBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of GRPCBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *GRPCBroker) Run() { + for { + stream, err := m.streamer.Recv() + if err != nil { + // Once we receive an error, just exit + break + } + + // Initialize the waiter + p := m.getStream(stream.ServiceId) + select { + case p.ch <- stream: + default: + } + + go m.timeoutWait(stream.ServiceId, p) + } +} + +func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 0000000..294518e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,111 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "net" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0, 5) + + // We use a custom dialer so that we can connect over unix domain sockets. + opts = append(opts, grpc.WithDialer(dialer)) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if tls == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(tls))) + } + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return conn, nil +} + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer) + if err != nil { + return nil, err + } + + // Start the broker. + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig) + go broker.Run() + go brokerGRPCClient.StartStream() + + cl := &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + doneCtx: doneCtx, + broker: broker, + controller: plugin.NewGRPCControllerClient(conn), + } + + return cl, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin + + doneCtx context.Context + broker *GRPCBroker + + controller plugin.GRPCControllerClient +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + c.broker.Close() + c.controller.Shutdown(c.doneCtx, &plugin.Empty{}) + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.doneCtx, c.broker, c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go new file mode 100644 index 0000000..1a8a8e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -0,0 +1,23 @@ +package plugin + +import ( + "context" + + "github.com/hashicorp/go-plugin/internal/plugin" +) + +// GRPCControllerServer handles shutdown calls to terminate the server when the +// plugin client is closed. +type grpcControllerServer struct { + server *GRPCServer +} + +// Shutdown stops the grpc server. It first will attempt a graceful stop, then a +// full stop on the server. +func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) { + resp := &plugin.Empty{} + + // TODO: figure out why GracefullStop doesn't work. + s.server.Stop() + return resp, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 0000000..d3dbf1c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,142 @@ +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + + logger hclog.Logger +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register the broker service + brokerServer := newGRPCBrokerServer() + plugin.RegisterGRPCBrokerServer(s.server, brokerServer) + s.broker = newGRPCBroker(brokerServer, s.TLS) + go s.broker.Run() + + // Register the controller + controllerServer := &grpcControllerServer{ + server: s, + } + plugin.RegisterGRPCControllerServer(s.server, controllerServer) + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatible plugin", k) + } + + if err := p.GRPCServer(s.broker, s.server); err != nil { + return fmt.Errorf("error registering %q: %s", k, err) + } + } + + return nil +} + +// Stop calls Stop on the underlying grpc.Server +func (s *GRPCServer) Stop() { + s.server.Stop() +} + +// GracefulStop calls GracefulStop on the underlying grpc.Server +func (s *GRPCServer) GracefulStop() { + s.server.GracefulStop() +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + defer close(s.DoneCh) + err := s.server.Serve(lis) + if err != nil { + s.logger.Error("grpc server", "error", err) + } +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go new file mode 100644 index 0000000..aa2fdc8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:. + +package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go new file mode 100644 index 0000000..b6850aa --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_broker.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ConnInfo struct { + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnInfo) Reset() { *m = ConnInfo{} } +func (m *ConnInfo) String() string { return proto.CompactTextString(m) } +func (*ConnInfo) ProtoMessage() {} +func (*ConnInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_802e9beed3ec3b28, []int{0} +} + +func (m *ConnInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnInfo.Unmarshal(m, b) +} +func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) +} +func (m *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(m, src) +} +func (m *ConnInfo) XXX_Size() int { + return xxx_messageInfo_ConnInfo.Size(m) +} +func (m *ConnInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConnInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnInfo proto.InternalMessageInfo + +func (m *ConnInfo) GetServiceId() uint32 { + if m != nil { + return m.ServiceId + } + return 0 +} + +func (m *ConnInfo) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *ConnInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func init() { + proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") +} + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) } + +var fileDescriptor_802e9beed3ec3b28 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, + 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { + s.RegisterService(&_GRPCBroker_serviceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_broker.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto new file mode 100644 index 0000000..3fa79e8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message ConnInfo { + uint32 service_id = 1; + string network = 2; + string address = 3; +} + +service GRPCBroker { + rpc StartStream(stream ConnInfo) returns (stream ConnInfo); +} + + diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go new file mode 100644 index 0000000..38b4204 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_controller.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_23c2c7e42feab570, []int{0} +} + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "plugin.Empty") +} + +func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) } + +var fileDescriptor_23c2c7e42feab570 = []byte{ + // 108 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, + 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, + 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, + 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) { + s.RegisterService(&_GRPCController_serviceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/plugin.GRPCController/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _GRPCController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_controller.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto new file mode 100644 index 0000000..345d0a1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message Empty { +} + +// The GRPCController is responsible for telling the plugin server to shutdown. +service GRPCController { + rpc Shutdown(Empty) returns (Empty); +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go new file mode 100644 index 0000000..2996c14 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "encoding/json" + "time" +) + +// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host +type logEntry struct { + Message string `json:"@message"` + Level string `json:"@level"` + Timestamp time.Time `json:"timestamp"` + KVPairs []*logEntryKV `json:"kv_pairs"` +} + +// logEntryKV is a key value pair within the Output payload +type logEntryKV struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// flattenKVPairs is used to flatten KVPair slice into []interface{} +// for hclog consumption. +func flattenKVPairs(kvs []*logEntryKV) []interface{} { + var result []interface{} + for _, kv := range kvs { + result = append(result, kv.Key) + result = append(result, kv.Value) + } + + return result +} + +// parseJSON handles parsing JSON output +func parseJSON(input string) (*logEntry, error) { + var raw map[string]interface{} + entry := &logEntry{} + + err := json.Unmarshal([]byte(input), &raw) + if err != nil { + return nil, err + } + + // Parse hclog-specific objects + if v, ok := raw["@message"]; ok { + entry.Message = v.(string) + delete(raw, "@message") + } + + if v, ok := raw["@level"]; ok { + entry.Level = v.(string) + delete(raw, "@level") + } + + if v, ok := raw["@timestamp"]; ok { + t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) + if err != nil { + return nil, err + } + entry.Timestamp = t + delete(raw, "@timestamp") + } + + // Parse dynamic KV args from the hclog payload. + for k, v := range raw { + entry.KVPairs = append(entry.KVPairs, &logEntryKV{ + Key: k, + Value: v, + }) + } + + return entry, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go new file mode 100644 index 0000000..8895524 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +// generateCert generates a temporary certificate for plugin authentication. The +// certificate and private key are returns in PEM format. +func generateCert() (cert []byte, privateKey []byte, err error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + host := "localhost" + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + Organization: []string{"HashiCorp"}, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, err + } + + var certOut bytes.Buffer + if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { + return nil, nil, err + } + + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, err + } + + var keyOut bytes.Buffer + if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return nil, nil, err + } + + cert = certOut.Bytes() + privateKey = keyOut.Bytes() + + return cert, privateKey, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go new file mode 100644 index 0000000..01c45ad --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -0,0 +1,204 @@ +package plugin + +import ( + "encoding/binary" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/yamux" +) + +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// It is used by plugins to multiplex multiple RPC connections and data +// streams on top of a single connection between the plugin process and the +// host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new multiplexed streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type MuxBroker struct { + nextId uint32 + session *yamux.Session + streams map[uint32]*muxBrokerPending + + sync.Mutex +} + +type muxBrokerPending struct { + ch chan net.Conn + doneCh chan struct{} +} + +func newMuxBroker(s *yamux.Session) *MuxBroker { + return &MuxBroker{ + session: s, + streams: make(map[uint32]*muxBrokerPending), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { + var c net.Conn + p := m.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + m.Lock() + defer m.Unlock() + delete(m.streams, id) + + return nil, fmt.Errorf("timeout waiting for accept") + } + + // Ack our connection + if err := binary.Write(c, binary.LittleEndian, id); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve an RPC server on that stream ID. This is used to easily serve +// complex arguments. +// +// The served interface is always registered to the "Plugin" name. +func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { + conn, err := m.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + + serve(conn, "Plugin", v) +} + +// Close closes the connection and all sub-connections. +func (m *MuxBroker) Close() error { + return m.session.Close() +} + +// Dial opens a connection by ID. +func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { + // Open the stream + stream, err := m.session.OpenStream() + if err != nil { + return nil, err + } + + // Write the stream ID onto the wire. + if err := binary.Write(stream, binary.LittleEndian, id); err != nil { + stream.Close() + return nil, err + } + + // Read the ack that we connected. Then we're off! + var ack uint32 + if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { + stream.Close() + return nil, err + } + if ack != id { + stream.Close() + return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) + } + + return stream, nil +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of RPC calls. In practice +// we've never seen it happen. +func (m *MuxBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of MuxBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *MuxBroker) Run() { + for { + stream, err := m.session.AcceptStream() + if err != nil { + // Once we receive an error, just exit + break + } + + // Read the stream ID from the stream + var id uint32 + if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { + stream.Close() + continue + } + + // Initialize the waiter + p := m.getStream(id) + select { + case p.ch <- stream: + default: + } + + // Wait for a timeout + go m.timeoutWait(id, p) + } +} + +func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &muxBrokerPending{ + ch: make(chan net.Conn, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + timeout := false + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + timeout = true + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) + + // If we timed out, then check if we have a channel in the buffer, + // and if so, close it. + if timeout { + select { + case s := <-p.ch: + s.Close() + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go new file mode 100644 index 0000000..79d9674 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -0,0 +1,58 @@ +// The plugin package exposes functions and helpers for communicating to +// plugins which are implemented as standalone binary applications. +// +// plugin.Client fully manages the lifecycle of executing the application, +// connecting to it, and returning the RPC client for dispensing plugins. +// +// plugin.Serve fully manages listeners to expose an RPC server from a binary +// that plugin.Client can connect to. +package plugin + +import ( + "context" + "errors" + "net/rpc" + + "google.golang.org/grpc" +) + +// Plugin is the interface that is implemented to serve/connect to an +// inteface implementation. +type Plugin interface { + // Server should return the RPC server compatible struct to serve + // the methods that the Client calls over net/rpc. + Server(*MuxBroker) (interface{}, error) + + // Client returns an interface implementation for the plugin you're + // serving that communicates to the server end of the plugin. + Client(*MuxBroker, *rpc.Client) (interface{}, error) +} + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*GRPCBroker, *grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. The provided context will be canceled by + // go-plugin in the event of the plugin process exiting. + GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go new file mode 100644 index 0000000..88c999a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -0,0 +1,24 @@ +package plugin + +import ( + "time" +) + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go new file mode 100644 index 0000000..70ba546 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -0,0 +1,19 @@ +// +build !windows + +package plugin + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go new file mode 100644 index 0000000..9f7b018 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "syscall" +) + +const ( + // Weird name but matches the MSDN docs + exit_STILL_ACTIVE = 259 + + processDesiredAccess = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | + syscall.SYNCHRONIZE +) + +// _pidAlive tests whether a process is alive or not +func _pidAlive(pid int) bool { + h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) + if err != nil { + return false + } + + var ec uint32 + if e := syscall.GetExitCodeProcess(h, &ec); e != nil { + return false + } + + return ec == exit_STILL_ACTIVE +} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 0000000..0cfc19e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go new file mode 100644 index 0000000..f30a4b1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -0,0 +1,170 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. +type RPCClient struct { + broker *MuxBroker + control *rpc.Client + plugins map[string]Plugin + + // These are the streams used for the various stdout/err overrides + stdout, stderr net.Conn +} + +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + +// NewRPCClient creates a client from an already-open connection-like value. +// Dial is typically used instead. +func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { + // Create the yamux client so we can multiplex + mux, err := yamux.Client(conn, nil) + if err != nil { + conn.Close() + return nil, err + } + + // Connect to the control stream. + control, err := mux.Open() + if err != nil { + mux.Close() + return nil, err + } + + // Connect stdout, stderr streams + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Open() + if err != nil { + mux.Close() + return nil, err + } + } + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Build the client using our broker and control channel. + return &RPCClient{ + broker: broker, + control: rpc.NewClient(control), + plugins: plugins, + stdout: stdstream[0], + stderr: stdstream[1], + }, nil +} + +// SyncStreams should be called to enable syncing of stdout, +// stderr with the plugin. +// +// This will return immediately and the syncing will continue to happen +// in the background. You do not need to launch this in a goroutine itself. +// +// This should never be called multiple times. +func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { + go copyStream("stdout", stdout, c.stdout) + go copyStream("stderr", stderr, c.stderr) + return nil +} + +// Close closes the connection. The client is no longer usable after this +// is called. +func (c *RPCClient) Close() error { + // Call the control channel and ask it to gracefully exit. If this + // errors, then we save it so that we always return an error but we + // want to try to close the other channels anyways. + var empty struct{} + returnErr := c.control.Call("Control.Quit", true, &empty) + + // Close the other streams we have + if err := c.control.Close(); err != nil { + return err + } + if err := c.stdout.Close(); err != nil { + return err + } + if err := c.stderr.Close(); err != nil { + return err + } + if err := c.broker.Close(); err != nil { + return err + } + + // Return back the error we got from Control.Quit. This is very important + // since we MUST return non-nil error if this fails so that Client.Kill + // will properly try a process.Kill. + return returnErr +} + +func (c *RPCClient) Dispense(name string) (interface{}, error) { + p, ok := c.plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + var id uint32 + if err := c.control.Call( + "Dispenser.Dispense", name, &id); err != nil { + return nil, err + } + + conn, err := c.broker.Dial(id) + if err != nil { + return nil, err + } + + return p.Client(c.broker, rpc.NewClient(conn)) +} + +// Ping pings the connection to ensure it is still alive. +// +// The error from the RPC call is returned exactly if you want to inspect +// it for further error analysis. Any error returned from here would indicate +// that the connection to the plugin is not healthy. +func (c *RPCClient) Ping() error { + var empty struct{} + return c.control.Call("Control.Ping", true, &empty) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go new file mode 100644 index 0000000..5bb18dd --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -0,0 +1,197 @@ +package plugin + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "net/rpc" + "sync" + + "github.com/hashicorp/yamux" +) + +// RPCServer listens for network connections and then dispenses interface +// implementations over net/rpc. +// +// After setting the fields below, they shouldn't be read again directly +// from the structure which may be reading/writing them concurrently. +type RPCServer struct { + Plugins map[string]Plugin + + // Stdout, Stderr are what this server will use instead of the + // normal stdin/out/err. This is because due to the multi-process nature + // of our plugin system, we can't use the normal process values so we + // make our own custom one we pipe across. + Stdout io.Reader + Stderr io.Reader + + // DoneCh should be set to a non-nil channel that will be closed + // when the control requests the RPC server to end. + DoneCh chan<- struct{} + + lock sync.Mutex +} + +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Printf("[ERR] plugin: plugin server: %s", err) + return + } + + go s.ServeConn(conn) + } +} + +// ServeConn runs a single connection. +// +// ServeConn blocks, serving the connection until the client hangs up. +func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { + // First create the yamux server to wrap this connection + mux, err := yamux.Server(conn, nil) + if err != nil { + conn.Close() + log.Printf("[ERR] plugin: error creating yamux server: %s", err) + return + } + + // Accept the control connection + control, err := mux.Accept() + if err != nil { + mux.Close() + if err != io.EOF { + log.Printf("[ERR] plugin: error accepting control connection: %s", err) + } + + return + } + + // Connect the stdstreams (in, out, err) + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Accept() + if err != nil { + mux.Close() + log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) + return + } + } + + // Copy std streams out to the proper place + go copyStream("stdout", stdstream[0], s.Stdout) + go copyStream("stderr", stdstream[1], s.Stderr) + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Use the control connection to build the dispenser and serve the + // connection. + server := rpc.NewServer() + server.RegisterName("Control", &controlServer{ + server: s, + }) + server.RegisterName("Dispenser", &dispenseServer{ + broker: broker, + plugins: s.Plugins, + }) + server.ServeConn(control) +} + +// done is called internally by the control server to trigger the +// doneCh to close which is listened to by the main process to cleanly +// exit. +func (s *RPCServer) done() { + s.lock.Lock() + defer s.lock.Unlock() + + if s.DoneCh != nil { + close(s.DoneCh) + s.DoneCh = nil + } +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type controlServer struct { + server *RPCServer +} + +// Ping can be called to verify the connection (and likely the binary) +// is still alive to a plugin. +func (c *controlServer) Ping( + null bool, response *struct{}) error { + *response = struct{}{} + return nil +} + +func (c *controlServer) Quit( + null bool, response *struct{}) error { + // End the server + c.server.done() + + // Always return true + *response = struct{}{} + + return nil +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type dispenseServer struct { + broker *MuxBroker + plugins map[string]Plugin +} + +func (d *dispenseServer) Dispense( + name string, response *uint32) error { + // Find the function to create this implementation + p, ok := d.plugins[name] + if !ok { + return fmt.Errorf("unknown plugin type: %s", name) + } + + // Create the implementation first so we know if there is an error. + impl, err := p.Server(d.broker) + if err != nil { + // We turn the error into an errors error so that it works across RPC + return errors.New(err.Error()) + } + + // Reserve an ID for our implementation + id := d.broker.NextId() + *response = id + + // Run the rest in a goroutine since it can only happen once this RPC + // call returns. We wait for a connection for the plugin implementation + // and serve it. + go func() { + conn, err := d.broker.Accept(id) + if err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) + return + } + + serve(conn, "Plugin", impl) + }() + + return nil +} + +func serve(conn io.ReadWriteCloser, name string, v interface{}) { + server := rpc.NewServer() + if err := server.RegisterName(name, v); err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) + return + } + + server.ServeConn(conn) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go new file mode 100644 index 0000000..fc9f05a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -0,0 +1,432 @@ +package plugin + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/signal" + "runtime" + "sort" + "strconv" + "strings" + "sync/atomic" + + "github.com/hashicorp/go-hclog" + + "google.golang.org/grpc" +) + +// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. +// We will increment this whenever we change any protocol behavior. This +// will invalidate any prior plugins but will at least allow us to iterate +// on the core in a safe way. We will do our best to do this very +// infrequently. +const CoreProtocolVersion = 1 + +// HandshakeConfig is the configuration used by client and servers to +// handshake before starting a plugin connection. This is embedded by +// both ServeConfig and ClientConfig. +// +// In practice, the plugin host creates a HandshakeConfig that is exported +// and plugins then can easily consume it. +type HandshakeConfig struct { + // ProtocolVersion is the version that clients must match on to + // agree they can communicate. This should match the ProtocolVersion + // set on ClientConfig when using a plugin. + // This field is not required if VersionedPlugins are being used in the + // Client or Server configurations. + ProtocolVersion uint + + // MagicCookieKey and value are used as a very basic verification + // that a plugin is intended to be launched. This is not a security + // measure, just a UX feature. If the magic cookie doesn't match, + // we show human-friendly output. + MagicCookieKey string + MagicCookieValue string +} + +// PluginSet is a set of plugins provided to be registered in the plugin +// server. +type PluginSet map[string]Plugin + +// ServeConfig configures what sorts of plugins are served. +type ServeConfig struct { + // HandshakeConfig is the configuration that must match clients. + HandshakeConfig + + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + + // Plugins are the plugins that are served. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // GRPCServer should be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger +} + +// protocolVersion determines the protocol version and plugin set to be used by +// the server. In the event that there is no suitable version, the last version +// in the config is returned leaving the client to report the incompatibility. +func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { + protoVersion := int(opts.ProtocolVersion) + pluginSet := opts.Plugins + protoType := ProtocolNetRPC + // Check if the client sent a list of acceptable versions + var clientVersions []int + if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { + for _, s := range strings.Split(vs, ",") { + v, err := strconv.Atoi(s) + if err != nil { + fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) + continue + } + clientVersions = append(clientVersions, v) + } + } + + // We want to iterate in reverse order, to ensure we match the newest + // compatible plugin version. + sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) + + // set the old un-versioned fields as if they were versioned plugins + if opts.VersionedPlugins == nil { + opts.VersionedPlugins = make(map[int]PluginSet) + } + + if pluginSet != nil { + opts.VersionedPlugins[protoVersion] = pluginSet + } + + // Sort the version to make sure we match the latest first + var versions []int + for v := range opts.VersionedPlugins { + versions = append(versions, v) + } + + sort.Sort(sort.Reverse(sort.IntSlice(versions))) + + // See if we have multiple versions of Plugins to choose from + for _, version := range versions { + // Record each version, since we guarantee that this returns valid + // values even if they are not a protocol match. + protoVersion = version + pluginSet = opts.VersionedPlugins[version] + + // If we have a configured gRPC server we should select a protocol + if opts.GRPCServer != nil { + // All plugins in a set must use the same transport, so check the first + // for the protocol type + for _, p := range pluginSet { + switch p.(type) { + case GRPCPlugin: + protoType = ProtocolGRPC + default: + protoType = ProtocolNetRPC + } + break + } + } + + for _, clientVersion := range clientVersions { + if clientVersion == protoVersion { + return protoVersion, protoType, pluginSet + } + } + } + + // Return the lowest version as the fallback. + // Since we iterated over all the versions in reverse order above, these + // values are from the lowest version number plugins (which may be from + // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins + // fields). This allows serving the oldest version of our plugins to a + // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. + return protoVersion, protoType, pluginSet +} + +// Serve serves the plugins given by ServeConfig. +// +// Serve doesn't return until the plugin is done being executed. Any +// errors will be outputted to os.Stderr. +// +// This is the method that plugins should call in their main() functions. +func Serve(opts *ServeConfig) { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + os.Exit(1) + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + os.Exit(1) + } + + // negotiate the version and plugins + // start with default version in the handshake config + protoVersion, protoType, pluginSet := protocolVersion(opts) + + // Logging goes to the original stderr + log.SetOutput(os.Stderr) + + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // Register a listener so we can accept a connection + listener, err := serverListener() + if err != nil { + logger.Error("plugin init error", "error", err) + return + } + + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config + if opts.TLSProvider != nil { + tlsConfig, err = opts.TLSProvider() + if err != nil { + logger.Error("plugin tls init", "error", err) + return + } + } + + var serverCert string + clientCert := os.Getenv("PLUGIN_CLIENT_CERT") + // If the client is configured using AutoMTLS, the certificate will be here, + // and we need to generate our own in response. + if tlsConfig == nil && clientCert != "" { + logger.Info("configuring server automatic mTLS") + clientCertPool := x509.NewCertPool() + if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { + logger.Error("client cert provided but failed to parse", "cert", clientCert) + } + + certPEM, keyPEM, err := generateCert() + if err != nil { + logger.Error("failed to generate client certificate", "error", err) + panic(err) + } + + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + logger.Error("failed to parse client certificate", "error", err) + panic(err) + } + + tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCertPool, + MinVersion: tls.VersionTLS12, + } + + // We send back the raw leaf cert data for the client rather than the + // PEM, since the protocol can't handle newlines. + serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) + } + + // Create the channel to tell us when we're done + doneCh := make(chan struct{}) + + // Build the server type + var server ServerProtocol + switch protoType { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: pluginSet, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + // Create the gRPC server + server = &GRPCServer{ + Plugins: pluginSet, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + logger: logger, + } + + default: + panic("unknown server protocol: " + protoType) + } + + // Initialize the servers + if err := server.Init(); err != nil { + logger.Error("protocol init", "error", err) + return + } + + logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) + + // Output the address and service name to stdout so that the client can bring it up. + fmt.Printf("%d|%d|%s|%s|%s|%s\n", + CoreProtocolVersion, + protoVersion, + listener.Addr().Network(), + listener.Addr().String(), + protoType, + serverCert) + os.Stdout.Sync() + + // Eat the interrupts + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + var count int32 = 0 + for { + <-ch + newCount := atomic.AddInt32(&count, 1) + logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) + } + }() + + // Set our new out, err + os.Stdout = stdout_w + os.Stderr = stderr_w + + // Accept connections and wait for completion + go server.Serve(listener) + <-doneCh +} + +func serverListener() (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix() +} + +func serverListener_tcp() (net.Listener, error) { + minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32) + if err != nil { + return nil, err + } + + maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32) + if err != nil { + return nil, err + } + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix() (net.Listener, error) { + tf, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + // Wrap the listener in rmListener so that the Unix domain socket file + // is removed on close. + return &rmListener{ + Listener: l, + Path: path, + }, nil +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also removes a file as part of the close. We +// use this to cleanup the unix domain socket on close. +type rmListener struct { + net.Listener + Path string +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return os.Remove(l.Path) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go new file mode 100644 index 0000000..033079e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "fmt" + "os" +) + +// ServeMuxMap is the type that is used to configure ServeMux +type ServeMuxMap map[string]*ServeConfig + +// ServeMux is like Serve, but serves multiple types of plugins determined +// by the argument given on the command-line. +// +// This command doesn't return until the plugin is done being executed. Any +// errors are logged or output to stderr. +func ServeMux(m ServeMuxMap) { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, + "Invoked improperly. This is an internal command that shouldn't\n"+ + "be manually invoked.\n") + os.Exit(1) + } + + opts, ok := m[os.Args[1]] + if !ok { + fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) + os.Exit(1) + } + + Serve(opts) +} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go new file mode 100644 index 0000000..1d547aa --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -0,0 +1,18 @@ +package plugin + +import ( + "io" + "log" +) + +func copyStream(name string, dst io.Writer, src io.Reader) { + if src == nil { + panic(name + ": src is nil") + } + if dst == nil { + panic(name + ": dst is nil") + } + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go new file mode 100644 index 0000000..2cf2c26 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -0,0 +1,180 @@ +package plugin + +import ( + "bytes" + "context" + "io" + "net" + "net/rpc" + + "github.com/mitchellh/go-testing-interface" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" +) + +// TestOptions allows specifying options that can affect the behavior of the +// test functions +type TestOptions struct { + //ServerStdout causes the given value to be used in place of a blank buffer + //for RPCServer's Stdout + ServerStdout io.ReadCloser + + //ServerStderr causes the given value to be used in place of a blank buffer + //for RPCServer's Stderr + ServerStderr io.ReadCloser +} + +// The testing file contains test helpers that you can use outside of +// this package for making it easier to test plugins themselves. + +// TestConn is a helper function for returning a client and server +// net.Conn connected to each other. +func TestConn(t testing.T) (net.Conn, net.Conn) { + // Listen to any local port. This listener will be closed + // after a single connection is established. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start a goroutine to accept our client connection + var serverConn net.Conn + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer l.Close() + var err error + serverConn, err = l.Accept() + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // Connect to the server + clientConn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Wait for the server side to acknowledge it has connected + <-doneCh + + return clientConn, serverConn +} + +// TestRPCConn returns a rpc client and server connected to each other. +func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { + clientConn, serverConn := TestConn(t) + + server := rpc.NewServer() + go server.ServeConn(serverConn) + + client := rpc.NewClient(clientConn) + return client, server +} + +// TestPluginRPCConn returns a plugin RPC client and server that are connected +// together and configured. +func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { + // Create two net.Conns we can use to shuttle our control connection + clientConn, serverConn := TestConn(t) + + // Start up the server + server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + if opts != nil { + if opts.ServerStdout != nil { + server.Stdout = opts.ServerStdout + } + if opts.ServerStderr != nil { + server.Stderr = opts.ServerStderr + } + } + go server.ServeConn(serverConn) + + // Connect the client to the server + client, err := NewRPCClient(clientConn, ps) + if err != nil { + t.Fatalf("err: %s", err) + } + + return client, server +} + +// TestGRPCConn returns a gRPC client conn and grpc server that are connected +// together and configured. The register function is used to register services +// prior to the Serve call. This is used to test gRPC connections. +func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := grpc.NewServer() + register(server) + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + return conn, server +} + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start up the server + server := &GRPCServer{ + Plugins: ps, + DoneCh: make(chan struct{}), + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + logger: hclog.Default(), + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, nil) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Create the client + client := &GRPCClient{ + Conn: conn, + Plugins: ps, + broker: broker, + doneCtx: context.Background(), + controller: plugin.NewGRPCControllerClient(conn), + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml new file mode 100644 index 0000000..80e1de4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.6 + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE new file mode 100644 index 0000000..e87a115 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile new file mode 100644 index 0000000..c3989e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/Makefile @@ -0,0 +1,8 @@ +TEST?=./... + +test: + go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 + go vet $(TEST) + go test $(TEST) -race + +.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md new file mode 100644 index 0000000..f5abffc --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/README.md @@ -0,0 +1,43 @@ +# rootcerts + +Functions for loading root certificates for TLS connections. + +----- + +Go's standard library `crypto/tls` provides a common mechanism for configuring +TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool +of certificates for the client to use as a trust store when verifying server +certificates. + +This library contains utility functions for loading certificates destined for +that field, as well as one other important thing: + +When the `RootCAs` field is `nil`, the standard library attempts to load the +host's root CA set. This behavior is OS-specific, and the Darwin +implementation contains [a bug that prevents trusted certificates from the +System and Login keychains from being loaded][1]. This library contains +Darwin-specific behavior that works around that bug. + +[1]: https://github.com/golang/go/issues/14514 + +## Example Usage + +Here's a snippet demonstrating how this library is meant to be used: + +```go +func httpClient() (*http.Client, error) + tlsConfig := &tls.Config{} + err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ + CAFile: os.Getenv("MYAPP_CAFILE"), + CAPath: os.Getenv("MYAPP_CAPATH"), + }) + if err != nil { + return nil, err + } + c := cleanhttp.DefaultClient() + t := cleanhttp.DefaultTransport() + t.TLSClientConfig = tlsConfig + c.Transport = t + return c, nil +} +``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go new file mode 100644 index 0000000..b55cc62 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/doc.go @@ -0,0 +1,9 @@ +// Package rootcerts contains functions to aid in loading CA certificates for +// TLS connections. +// +// In addition, its default behavior on Darwin works around an open issue [1] +// in Go's crypto/x509 that prevents certicates from being loaded from the +// System or Login keychains. +// +// [1] https://github.com/golang/go/issues/14514 +package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go new file mode 100644 index 0000000..aeb30ec --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go @@ -0,0 +1,103 @@ +package rootcerts + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// Config determines where LoadCACerts will load certificates from. When both +// CAFile and CAPath are blank, this library's functions will either load +// system roots explicitly and return them, or set the CertPool to nil to allow +// Go's standard library to load system certs. +type Config struct { + // CAFile is a path to a PEM-encoded certificate file or bundle. Takes + // precedence over CAPath. + CAFile string + + // CAPath is a path to a directory populated with PEM-encoded certificates. + CAPath string +} + +// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the +// Config specified. +func ConfigureTLS(t *tls.Config, c *Config) error { + if t == nil { + return nil + } + pool, err := LoadCACerts(c) + if err != nil { + return err + } + t.RootCAs = pool + return nil +} + +// LoadCACerts loads a CertPool based on the Config specified. +func LoadCACerts(c *Config) (*x509.CertPool, error) { + if c == nil { + c = &Config{} + } + if c.CAFile != "" { + return LoadCAFile(c.CAFile) + } + if c.CAPath != "" { + return LoadCAPath(c.CAPath) + } + + return LoadSystemCAs() +} + +// LoadCAFile loads a single PEM-encoded file from the path specified. +func LoadCAFile(caFile string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Error loading CA File: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) + } + + return pool, nil +} + +// LoadCAPath walks the provided path and loads all certificates encounted into +// a pool. +func LoadCAPath(caPath string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + pem, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("Error loading file from CAPath: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) + } + + return nil + } + + err := filepath.Walk(caPath, walkFn) + if err != nil { + return nil, err + } + + return pool, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go new file mode 100644 index 0000000..66b1472 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go @@ -0,0 +1,12 @@ +// +build !darwin + +package rootcerts + +import "crypto/x509" + +// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that +// default behavior of standard TLS config libraries is triggered, which is to +// load system certs. +func LoadSystemCAs() (*x509.CertPool, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go new file mode 100644 index 0000000..a9a0406 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go @@ -0,0 +1,48 @@ +package rootcerts + +import ( + "crypto/x509" + "os/exec" + "path" + + "github.com/mitchellh/go-homedir" +) + +// LoadSystemCAs has special behavior on Darwin systems to work around +func LoadSystemCAs() (*x509.CertPool, error) { + pool := x509.NewCertPool() + + for _, keychain := range certKeychains() { + err := addCertsFromKeychain(pool, keychain) + if err != nil { + return nil, err + } + } + + return pool, nil +} + +func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { + cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) + data, err := cmd.Output() + if err != nil { + return err + } + + pool.AppendCertsFromPEM(data) + + return nil +} + +func certKeychains() []string { + keychains := []string{ + "/System/Library/Keychains/SystemRootCertificates.keychain", + "/Library/Keychains/System.keychain", + } + home, err := homedir.Dir() + if err == nil { + loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") + keychains = append(keychains, loginKeychain) + } + return keychains +} diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml new file mode 100644 index 0000000..542ca8b --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.9 + - "1.10" + +script: + - go test diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 0000000..c33dcc7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 0000000..6f3a15c --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,65 @@ +# Versioning Library for Go +[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 0000000..d055759 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,204 @@ +package version + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + check *Version + original string +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintFunc + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintFunc{ + "": constraintEqual, + "=": constraintEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "~>": constraintPessimistic, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + return &Constraint{ + f: constraintOperators[matches[1]], + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod new file mode 100644 index 0000000..f528555 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 0000000..4d1e6e2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,347 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var versionRegexp *regexp.Regexp + +// The raw regular expression string used for testing the validity +// of a version. +const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + matches := versionRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + si := 0 + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = int64(val) + si++ + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: si, + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// or GreaterThan methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 0000000..cc888d4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/hashicorp/golang-lru/.gitignore b/vendor/github.com/hashicorp/golang-lru/.gitignore new file mode 100644 index 0000000..8365624 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go new file mode 100644 index 0000000..e474cd0 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -0,0 +1,223 @@ +package lru + +import ( + "fmt" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache struct { + size int + recentSize int + + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q(size int) (*TwoQueueCache, error) { + return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, fmt.Errorf("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, fmt.Errorf("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU(evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache{ + size: size, + recentSize: recentSize, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *TwoQueueCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) + return +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, nil) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 0000000..be2cc4d --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md new file mode 100644 index 0000000..33e58cf --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/README.md @@ -0,0 +1,25 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) + +Example +======= + +Using the LRU is very simple: + +```go +l, _ := New(128) +for i := 0; i < 256; i++ { + l.Add(i, nil) +} +if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go new file mode 100644 index 0000000..555225a --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -0,0 +1,257 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). +// ARC is an enhancement over the standard LRU cache in that tracks both +// frequency and recency of use. This avoids a burst in access to new +// entries from evicting the frequently used older entries. It adds some +// additional tracking overhead to a standard LRU cache, computationally +// it is roughly 2x the cost, and the extra memory overhead is linear +// with the size of the cache. ARC has been patented by IBM, but is +// similar to the TwoQueueCache (2Q) which requires setting parameters. +type ARCCache struct { + size int // Size is the total capacity of the cache + p int // P is the dynamic preference towards T1 or T2 + + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 + + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 + + lock sync.RWMutex +} + +// NewARC creates an ARC of the given size +func NewARC(size int) (*ARCCache, error) { + // Create the sub LRUs + b1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + b2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + + // Initialize the ARC + c := &ARCCache{ + size: size, + p: 0, + t1: t1, + b1: b1, + t2: t2, + b2: b2, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // If the value is contained in T1 (recent), then + // promote it to T2 (frequent) + if val, ok := c.t1.Peek(key); ok { + c.t1.Remove(key) + c.t2.Add(key, val) + return val, ok + } + + // Check if the value is contained in T2 (frequent) + if val, ok := c.t2.Get(key); ok { + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *ARCCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is contained in T1 (recent), and potentially + // promote it to frequent T2 + if c.t1.Contains(key) { + c.t1.Remove(key) + c.t2.Add(key, value) + return + } + + // Check if the value is already in T2 (frequent) and update it + if c.t2.Contains(key) { + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // recently used list + if c.b1.Contains(key) { + // T1 set is too small, increase P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b2Len > b1Len { + delta = b2Len / b1Len + } + if c.p+delta >= c.size { + c.p = c.size + } else { + c.p += delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Remove from B1 + c.b1.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // frequently used list + if c.b2.Contains(key) { + // T2 set is too small, decrease P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b1Len > b2Len { + delta = b1Len / b2Len + } + if delta >= c.p { + c.p = 0 + } else { + c.p -= delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(true) + } + + // Remove from B2 + c.b2.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Keep the size of the ghost buffers trim + if c.b1.Len() > c.size-c.p { + c.b1.RemoveOldest() + } + if c.b2.Len() > c.p { + c.b2.RemoveOldest() + } + + // Add to the recently seen list + c.t1.Add(key, value) + return +} + +// replace is used to adaptively evict from either T1 or T2 +// based on the current learned value of P +func (c *ARCCache) replace(b2ContainsKey bool) { + t1Len := c.t1.Len() + if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { + k, _, ok := c.t1.RemoveOldest() + if ok { + c.b1.Add(k, nil) + } + } else { + k, _, ok := c.t2.RemoveOldest() + if ok { + c.b2.Add(k, nil) + } + } +} + +// Len returns the number of cached entries +func (c *ARCCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Len() + c.t2.Len() +} + +// Keys returns all the cached keys +func (c *ARCCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.t1.Keys() + k2 := c.t2.Keys() + return append(k1, k2...) +} + +// Remove is used to purge a key from the cache +func (c *ARCCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.t1.Remove(key) { + return + } + if c.t2.Remove(key) { + return + } + if c.b1.Remove(key) { + return + } + if c.b2.Remove(key) { + return + } +} + +// Purge is used to clear the cache +func (c *ARCCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.t1.Purge() + c.t2.Purge() + c.b1.Purge() + c.b2.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *ARCCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Contains(key) || c.t2.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.t1.Peek(key); ok { + return val, ok + } + return c.t2.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 0000000..2547df9 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/go.mod b/vendor/github.com/hashicorp/golang-lru/go.mod new file mode 100644 index 0000000..824cb97 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/golang-lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 0000000..c8d9b0a --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,110 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + lru simplelru.LRUCache + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New(size int) (*Cache, error) { + return NewWithEvict(size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { + lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) + if err != nil { + return nil, err + } + c := &Cache{ + lru: lru, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *Cache) Purge() { + c.lock.Lock() + c.lru.Purge() + c.lock.Unlock() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache) Add(key, value interface{}) (evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Add(key, value) +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Get(key) +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Contains(key) +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Peek(key) +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.lru.Contains(key) { + return true, false + } + evicted = c.lru.Add(key, value) + return false, evicted +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) { + c.lock.Lock() + c.lru.Remove(key) + c.lock.Unlock() +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + c.lock.Lock() + c.lru.RemoveOldest() + c.lock.Unlock() +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Keys() +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Len() +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 0000000..5673773 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,161 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 0000000..74c7077 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,36 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore new file mode 100644 index 0000000..15586a2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -0,0 +1,9 @@ +y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws + +*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml new file mode 100644 index 0000000..b7063d0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.7 + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 0000000..c33dcc7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile new file mode 100644 index 0000000..84fd743 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -0,0 +1,18 @@ +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go get -t ./... + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md new file mode 100644 index 0000000..c822332 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -0,0 +1,125 @@ +# HCL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) + +HCL (HashiCorp Configuration Language) is a configuration language built +by HashiCorp. The goal of HCL is to build a structured configuration language +that is both human and machine friendly for use with command-line tools, but +specifically targeted towards DevOps tools, servers, etc. + +HCL is also fully JSON compatible. That is, JSON can be used as completely +valid input to a system expecting HCL. This helps makes systems +interoperable with other systems. + +HCL is heavily inspired by +[libucl](https://github.com/vstakhov/libucl), +nginx configuration, and others similar. + +## Why? + +A common question when viewing HCL is to ask the question: why not +JSON, YAML, etc.? + +Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) +used a variety of configuration languages from full programming languages +such as Ruby to complete data structure languages such as JSON. What we +learned is that some people wanted human-friendly configuration languages +and some people wanted machine-friendly languages. + +JSON fits a nice balance in this, but is fairly verbose and most +importantly doesn't support comments. With YAML, we found that beginners +had a really hard time determining what the actual structure was, and +ended up guessing more often than not whether to use a hyphen, colon, etc. +in order to represent some configuration key. + +Full programming languages such as Ruby enable complex behavior +a configuration language shouldn't usually allow, and also forces +people to learn some set of Ruby. + +Because of this, we decided to create our own configuration language +that is JSON-compatible. Our configuration language (HCL) is designed +to be written and modified by humans. The API for HCL allows JSON +as an input so that it is also machine-friendly (machines can generate +JSON instead of trying to generate HCL). + +Our goal with HCL is not to alienate other configuration languages. +It is instead to provide HCL as a specialized language for our tools, +and JSON as the interoperability layer. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammar is listed here. + + * Single line comments start with `#` or `//` + + * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments + are not allowed. A multi-line comment (also known as a block comment) + terminates at the first `*/` found. + + * Values are assigned with the syntax `key = value` (whitespace doesn't + matter). The value can be any primitive: a string, number, boolean, + object, or list. + + * Strings are double-quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Multi-line strings start with `<- + echo %Path% + + go version + + go env + + go get -t ./... + +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 0000000..c8a077d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,716 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(int(v))) + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(int(v))) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + + // if item is an object that was decoded from ambiguous JSON and + // flattened, make sure it's expanded if it needs to decode into a + // defined structure. + item := expandObject(item, val) + + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +// expandObject detects if an ambiguous JSON object was flattened to a List which +// should be decoded into a struct, and expands the ast to properly deocode. +func expandObject(node ast.Node, result reflect.Value) ast.Node { + item, ok := node.(*ast.ObjectItem) + if !ok { + return node + } + + elemType := result.Type() + + // our target type must be a struct + switch elemType.Kind() { + case reflect.Ptr: + switch elemType.Elem().Kind() { + case reflect.Struct: + //OK + default: + return node + } + case reflect.Struct: + //OK + default: + return node + } + + // A list value will have a key and field name. If it had more fields, + // it wouldn't have been flattened. + if len(item.Keys) != 2 { + return node + } + + keyToken := item.Keys[0].Token + item.Keys = item.Keys[1:] + + // we need to un-flatten the ast enough to decode + newNode := &ast.ObjectItem{ + Keys: []*ast.ObjectKey{ + &ast.ObjectKey{ + Token: keyToken, + }, + }, + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + + return newNode +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok && item != nil { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + + // Ignore fields with tag name "-" + if tagParts[0] == "-" { + continue + } + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + for fieldType, field := range fields { + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, field) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + field.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeys": + unusedKeysVal = append(unusedKeysVal, field) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used key + usedKeys[fieldName] = struct{}{} + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, field); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, field); err != nil { + return err + } + } + + decodedFields = append(decodedFields, fieldType.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 0000000..575a20b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 0000000..6e5ef65 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,219 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + // I'm not entirely sure what causes this, but removing this causes + // a test failure. We should investigate at some point. + if len(o.Keys) == 0 { + return token.Pos{} + } + + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // comment types, only used when in a list + LeadComment *CommentGroup + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } +func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 0000000..ba07ad4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 0000000..5c99381 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 0000000..54a6493 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,507 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList(false) + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")) + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + var err error + + // If we have no keys, then it is a syntax error. i.e. {{}} is not + // allowed. + if len(keys) == 0 { + err = &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), + } + } + + // object + return keys, err + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + fmt.Println("illegal") + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList(true) + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type) + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), + } + } + } + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + node, err := p.literalType() + if err != nil { + return nil, err + } + + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil && len(l.List) > 0 { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go new file mode 100644 index 0000000..0735d95 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -0,0 +1,645 @@ +// Package scanner implements a scanner for HCL (HashiCorp Configuration +// Language) source text. +package scanner + +import ( + "bytes" + "fmt" + "os" + "regexp" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/hcl/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + if ch == '/' && s.peek() != '/' { + s.err("expected '/' for comment") + return + } + + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n + for n > 0 && digitVal(ch) < base { + ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + if n != start { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 0000000..956c899 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,244 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section, except for escaped backslashes, which we handle specifically. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + // We special case escaped backslashes in interpolations, converting + // them to their unescaped equivalents. + if r == '\\' { + q, _ := utf8.DecodeRuneInString(s) + switch q { + case '\\': + continue + } + } + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 0000000..59c1bb7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 0000000..e37c066 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,219 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list is empty, keep the original list + if len(ot.List) == 0 { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 0000000..6f46085 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,313 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + pos := p.tok.Pos + o.Assign = hcltoken.Pos{ + Filename: pos.Filename, + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // If we have a zero keycount it means that we never got + // an object key, i.e. `{ :`. This is a syntax error. + if keyCount == 0 { + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + + // Done + return keys, nil + case token.ILLEGAL: + fmt.Println("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 0000000..dd5c72b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 0000000..59c1bb7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 0000000..95a0c3e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 0000000..d9993c2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,38 @@ +package hcl + +import ( + "unicode" + "unicode/utf8" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w + if unicode.IsSpace(r) { + continue + } + if r == '{' { + return lexModeJson + } + break + } + + return lexModeHcl +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 0000000..1fca53c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,39 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// ParseBytes accepts as input byte slice and returns ast tree. +// +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { + case lexModeHcl: + return hclParser.Parse(in) + case lexModeJson: + return jsonParser.Parse(in) + } + + return nil, fmt.Errorf("unknown config format") +} + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +} diff --git a/vendor/github.com/hashicorp/nomad/LICENSE b/vendor/github.com/hashicorp/nomad/LICENSE new file mode 100644 index 0000000..e87a115 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/nomad/acl/acl.go b/vendor/github.com/hashicorp/nomad/acl/acl.go new file mode 100644 index 0000000..f5a76ea --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/acl/acl.go @@ -0,0 +1,374 @@ +package acl + +import ( + "fmt" + "sort" + "strings" + + iradix "github.com/hashicorp/go-immutable-radix" + glob "github.com/ryanuber/go-glob" +) + +// ManagementACL is a singleton used for management tokens +var ManagementACL *ACL + +func init() { + var err error + ManagementACL, err = NewACL(true, nil) + if err != nil { + panic(fmt.Errorf("failed to setup management ACL: %v", err)) + } +} + +// capabilitySet is a type wrapper to help managing a set of capabilities +type capabilitySet map[string]struct{} + +func (c capabilitySet) Check(k string) bool { + _, ok := c[k] + return ok +} + +func (c capabilitySet) Set(k string) { + c[k] = struct{}{} +} + +func (c capabilitySet) Clear() { + for cap := range c { + delete(c, cap) + } +} + +// ACL object is used to convert a set of policies into a structure that +// can be efficiently evaluated to determine if an action is allowed. +type ACL struct { + // management tokens are allowed to do anything + management bool + + // namespaces maps a namespace to a capabilitySet + namespaces *iradix.Tree + + // wildcardNamespaces maps a glob pattern of a namespace to a capabilitySet + // We use an iradix for the purposes of ordered iteration. + wildcardNamespaces *iradix.Tree + + agent string + node string + operator string + quota string +} + +// maxPrivilege returns the policy which grants the most privilege +// This handles the case of Deny always taking maximum precedence. +func maxPrivilege(a, b string) string { + switch { + case a == PolicyDeny || b == PolicyDeny: + return PolicyDeny + case a == PolicyWrite || b == PolicyWrite: + return PolicyWrite + case a == PolicyRead || b == PolicyRead: + return PolicyRead + default: + return "" + } +} + +// NewACL compiles a set of policies into an ACL object +func NewACL(management bool, policies []*Policy) (*ACL, error) { + // Hot-path management tokens + if management { + return &ACL{management: true}, nil + } + + // Create the ACL object + acl := &ACL{} + nsTxn := iradix.New().Txn() + wnsTxn := iradix.New().Txn() + + for _, policy := range policies { + NAMESPACES: + for _, ns := range policy.Namespaces { + // Should the namespace be matched using a glob? + globDefinition := strings.Contains(ns.Name, "*") + + // Check for existing capabilities + var capabilities capabilitySet + + if globDefinition { + raw, ok := wnsTxn.Get([]byte(ns.Name)) + if ok { + capabilities = raw.(capabilitySet) + } else { + capabilities = make(capabilitySet) + wnsTxn.Insert([]byte(ns.Name), capabilities) + } + } else { + raw, ok := nsTxn.Get([]byte(ns.Name)) + if ok { + capabilities = raw.(capabilitySet) + } else { + capabilities = make(capabilitySet) + nsTxn.Insert([]byte(ns.Name), capabilities) + } + } + + // Deny always takes precedence + if capabilities.Check(NamespaceCapabilityDeny) { + continue NAMESPACES + } + + // Add in all the capabilities + for _, cap := range ns.Capabilities { + if cap == NamespaceCapabilityDeny { + // Overwrite any existing capabilities + capabilities.Clear() + capabilities.Set(NamespaceCapabilityDeny) + continue NAMESPACES + } + capabilities.Set(cap) + } + } + + // Take the maximum privilege for agent, node, and operator + if policy.Agent != nil { + acl.agent = maxPrivilege(acl.agent, policy.Agent.Policy) + } + if policy.Node != nil { + acl.node = maxPrivilege(acl.node, policy.Node.Policy) + } + if policy.Operator != nil { + acl.operator = maxPrivilege(acl.operator, policy.Operator.Policy) + } + if policy.Quota != nil { + acl.quota = maxPrivilege(acl.quota, policy.Quota.Policy) + } + } + + // Finalize the namespaces + acl.namespaces = nsTxn.Commit() + acl.wildcardNamespaces = wnsTxn.Commit() + return acl, nil +} + +// AllowNsOp is shorthand for AllowNamespaceOperation +func (a *ACL) AllowNsOp(ns string, op string) bool { + return a.AllowNamespaceOperation(ns, op) +} + +// AllowNamespaceOperation checks if a given operation is allowed for a namespace +func (a *ACL) AllowNamespaceOperation(ns string, op string) bool { + // Hot path management tokens + if a.management { + return true + } + + // Check for a matching capability set + capabilities, ok := a.matchingCapabilitySet(ns) + if !ok { + return false + } + + // Check if the capability has been granted + return capabilities.Check(op) +} + +// AllowNamespace checks if any operations are allowed for a namespace +func (a *ACL) AllowNamespace(ns string) bool { + // Hot path management tokens + if a.management { + return true + } + + // Check for a matching capability set + capabilities, ok := a.matchingCapabilitySet(ns) + if !ok { + return false + } + + // Check if the capability has been granted + if len(capabilities) == 0 { + return false + } + + return !capabilities.Check(PolicyDeny) +} + +// matchingCapabilitySet looks for a capabilitySet that matches the namespace, +// if no concrete definitions are found, then we return the closest matching +// glob. +// The closest matching glob is the one that has the smallest character +// difference between the namespace and the glob. +func (a *ACL) matchingCapabilitySet(ns string) (capabilitySet, bool) { + // Check for a concrete matching capability set + raw, ok := a.namespaces.Get([]byte(ns)) + if ok { + return raw.(capabilitySet), true + } + + // We didn't find a concrete match, so lets try and evaluate globs. + return a.findClosestMatchingGlob(ns) +} + +type matchingGlob struct { + ns string + difference int + capabilitySet capabilitySet +} + +func (a *ACL) findClosestMatchingGlob(ns string) (capabilitySet, bool) { + // First, find all globs that match. + matchingGlobs := a.findAllMatchingWildcards(ns) + + // If none match, let's return. + if len(matchingGlobs) == 0 { + return capabilitySet{}, false + } + + // If a single matches, lets be efficient and return early. + if len(matchingGlobs) == 1 { + return matchingGlobs[0].capabilitySet, true + } + + // Stable sort the matched globs, based on the character difference between + // the glob definition and the requested namespace. This allows us to be + // more consistent about results based on the policy definition. + sort.SliceStable(matchingGlobs, func(i, j int) bool { + return matchingGlobs[i].difference <= matchingGlobs[j].difference + }) + + return matchingGlobs[0].capabilitySet, true +} + +func (a *ACL) findAllMatchingWildcards(ns string) []matchingGlob { + var matches []matchingGlob + + nsLen := len(ns) + + a.wildcardNamespaces.Root().Walk(func(bk []byte, iv interface{}) bool { + k := string(bk) + v := iv.(capabilitySet) + + isMatch := glob.Glob(k, ns) + if isMatch { + pair := matchingGlob{ + ns: k, + difference: nsLen - len(k) + strings.Count(k, glob.GLOB), + capabilitySet: v, + } + matches = append(matches, pair) + } + + // We always want to walk the entire tree, never terminate early. + return false + }) + + return matches +} + +// AllowAgentRead checks if read operations are allowed for an agent +func (a *ACL) AllowAgentRead() bool { + switch { + case a.management: + return true + case a.agent == PolicyWrite: + return true + case a.agent == PolicyRead: + return true + default: + return false + } +} + +// AllowAgentWrite checks if write operations are allowed for an agent +func (a *ACL) AllowAgentWrite() bool { + switch { + case a.management: + return true + case a.agent == PolicyWrite: + return true + default: + return false + } +} + +// AllowNodeRead checks if read operations are allowed for a node +func (a *ACL) AllowNodeRead() bool { + switch { + case a.management: + return true + case a.node == PolicyWrite: + return true + case a.node == PolicyRead: + return true + default: + return false + } +} + +// AllowNodeWrite checks if write operations are allowed for a node +func (a *ACL) AllowNodeWrite() bool { + switch { + case a.management: + return true + case a.node == PolicyWrite: + return true + default: + return false + } +} + +// AllowOperatorRead checks if read operations are allowed for a operator +func (a *ACL) AllowOperatorRead() bool { + switch { + case a.management: + return true + case a.operator == PolicyWrite: + return true + case a.operator == PolicyRead: + return true + default: + return false + } +} + +// AllowOperatorWrite checks if write operations are allowed for a operator +func (a *ACL) AllowOperatorWrite() bool { + switch { + case a.management: + return true + case a.operator == PolicyWrite: + return true + default: + return false + } +} + +// AllowQuotaRead checks if read operations are allowed for all quotas +func (a *ACL) AllowQuotaRead() bool { + switch { + case a.management: + return true + case a.quota == PolicyWrite: + return true + case a.quota == PolicyRead: + return true + default: + return false + } +} + +// AllowQuotaWrite checks if write operations are allowed for quotas +func (a *ACL) AllowQuotaWrite() bool { + switch { + case a.management: + return true + case a.quota == PolicyWrite: + return true + default: + return false + } +} + +// IsManagement checks if this represents a management token +func (a *ACL) IsManagement() bool { + return a.management +} diff --git a/vendor/github.com/hashicorp/nomad/acl/policy.go b/vendor/github.com/hashicorp/nomad/acl/policy.go new file mode 100644 index 0000000..db45f11 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/acl/policy.go @@ -0,0 +1,191 @@ +package acl + +import ( + "fmt" + "regexp" + + "github.com/hashicorp/hcl" +) + +const ( + // The following levels are the only valid values for the `policy = "read"` stanza. + // When policies are merged together, the most privilege is granted, except for deny + // which always takes precedence and supercedes. + PolicyDeny = "deny" + PolicyRead = "read" + PolicyWrite = "write" +) + +const ( + // The following are the fine-grained capabilities that can be granted within a namespace. + // The Policy stanza is a short hand for granting several of these. When capabilities are + // combined we take the union of all capabilities. If the deny capability is present, it + // takes precedence and overwrites all other capabilities. + NamespaceCapabilityDeny = "deny" + NamespaceCapabilityListJobs = "list-jobs" + NamespaceCapabilityReadJob = "read-job" + NamespaceCapabilitySubmitJob = "submit-job" + NamespaceCapabilityDispatchJob = "dispatch-job" + NamespaceCapabilityReadLogs = "read-logs" + NamespaceCapabilityReadFS = "read-fs" + NamespaceCapabilitySentinelOverride = "sentinel-override" +) + +var ( + validNamespace = regexp.MustCompile("^[a-zA-Z0-9-*]{1,128}$") +) + +// Policy represents a parsed HCL or JSON policy. +type Policy struct { + Namespaces []*NamespacePolicy `hcl:"namespace,expand"` + Agent *AgentPolicy `hcl:"agent"` + Node *NodePolicy `hcl:"node"` + Operator *OperatorPolicy `hcl:"operator"` + Quota *QuotaPolicy `hcl:"quota"` + Raw string `hcl:"-"` +} + +// IsEmpty checks to make sure that at least one policy has been set and is not +// comprised of only a raw policy. +func (p *Policy) IsEmpty() bool { + return len(p.Namespaces) == 0 && + p.Agent == nil && + p.Node == nil && + p.Operator == nil && + p.Quota == nil +} + +// NamespacePolicy is the policy for a specific namespace +type NamespacePolicy struct { + Name string `hcl:",key"` + Policy string + Capabilities []string +} + +type AgentPolicy struct { + Policy string +} + +type NodePolicy struct { + Policy string +} + +type OperatorPolicy struct { + Policy string +} + +type QuotaPolicy struct { + Policy string +} + +// isPolicyValid makes sure the given string matches one of the valid policies. +func isPolicyValid(policy string) bool { + switch policy { + case PolicyDeny, PolicyRead, PolicyWrite: + return true + default: + return false + } +} + +// isNamespaceCapabilityValid ensures the given capability is valid for a namespace policy +func isNamespaceCapabilityValid(cap string) bool { + switch cap { + case NamespaceCapabilityDeny, NamespaceCapabilityListJobs, NamespaceCapabilityReadJob, + NamespaceCapabilitySubmitJob, NamespaceCapabilityDispatchJob, NamespaceCapabilityReadLogs, + NamespaceCapabilityReadFS: + return true + // Separate the enterprise-only capabilities + case NamespaceCapabilitySentinelOverride: + return true + default: + return false + } +} + +// expandNamespacePolicy provides the equivalent set of capabilities for +// a namespace policy +func expandNamespacePolicy(policy string) []string { + switch policy { + case PolicyDeny: + return []string{NamespaceCapabilityDeny} + case PolicyRead: + return []string{ + NamespaceCapabilityListJobs, + NamespaceCapabilityReadJob, + } + case PolicyWrite: + return []string{ + NamespaceCapabilityListJobs, + NamespaceCapabilityReadJob, + NamespaceCapabilitySubmitJob, + NamespaceCapabilityDispatchJob, + NamespaceCapabilityReadLogs, + NamespaceCapabilityReadFS, + } + default: + return nil + } +} + +// Parse is used to parse the specified ACL rules into an +// intermediary set of policies, before being compiled into +// the ACL +func Parse(rules string) (*Policy, error) { + // Decode the rules + p := &Policy{Raw: rules} + if rules == "" { + // Hot path for empty rules + return p, nil + } + + // Attempt to parse + if err := hcl.Decode(p, rules); err != nil { + return nil, fmt.Errorf("Failed to parse ACL Policy: %v", err) + } + + // At least one valid policy must be specified, we don't want to store only + // raw data + if p.IsEmpty() { + return nil, fmt.Errorf("Invalid policy: %s", p.Raw) + } + + // Validate the policy + for _, ns := range p.Namespaces { + if !validNamespace.MatchString(ns.Name) { + return nil, fmt.Errorf("Invalid namespace name: %#v", ns) + } + if ns.Policy != "" && !isPolicyValid(ns.Policy) { + return nil, fmt.Errorf("Invalid namespace policy: %#v", ns) + } + for _, cap := range ns.Capabilities { + if !isNamespaceCapabilityValid(cap) { + return nil, fmt.Errorf("Invalid namespace capability '%s': %#v", cap, ns) + } + } + + // Expand the short hand policy to the capabilities and + // add to any existing capabilities + if ns.Policy != "" { + extraCap := expandNamespacePolicy(ns.Policy) + ns.Capabilities = append(ns.Capabilities, extraCap...) + } + } + + if p.Agent != nil && !isPolicyValid(p.Agent.Policy) { + return nil, fmt.Errorf("Invalid agent policy: %#v", p.Agent) + } + + if p.Node != nil && !isPolicyValid(p.Node.Policy) { + return nil, fmt.Errorf("Invalid node policy: %#v", p.Node) + } + + if p.Operator != nil && !isPolicyValid(p.Operator.Policy) { + return nil, fmt.Errorf("Invalid operator policy: %#v", p.Operator) + } + + if p.Quota != nil && !isPolicyValid(p.Quota.Policy) { + return nil, fmt.Errorf("Invalid quota policy: %#v", p.Quota) + } + return p, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir.go b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir.go new file mode 100644 index 0000000..8c3b2ce --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir.go @@ -0,0 +1,644 @@ +package allocdir + +import ( + "archive/tar" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + hclog "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hpcloud/tail/watch" + tomb "gopkg.in/tomb.v1" +) + +const ( + // idUnsupported is what the uid/gid will be set to on platforms (eg + // Windows) that don't support integer ownership identifiers. + idUnsupported = -1 +) + +var ( + // SnapshotErrorTime is the sentinel time that will be used on the + // error file written by Snapshot when it encounters as error. + SnapshotErrorTime = time.Date(2000, 0, 0, 0, 0, 0, 0, time.UTC) + + // The name of the directory that is shared across tasks in a task group. + SharedAllocName = "alloc" + + // Name of the directory where logs of Tasks are written + LogDirName = "logs" + + // SharedDataDir is one of the shared allocation directories. It is + // included in snapshots. + SharedDataDir = "data" + + // TmpDirName is the name of the temporary directory in each alloc and + // task. + TmpDirName = "tmp" + + // The set of directories that exist inside each shared alloc directory. + SharedAllocDirs = []string{LogDirName, TmpDirName, SharedDataDir} + + // The name of the directory that exists inside each task directory + // regardless of driver. + TaskLocal = "local" + + // TaskSecrets is the name of the secret directory inside each task + // directory + TaskSecrets = "secrets" + + // TaskDirs is the set of directories created in each tasks directory. + TaskDirs = map[string]os.FileMode{TmpDirName: os.ModeSticky | 0777} +) + +// AllocDir allows creating, destroying, and accessing an allocation's +// directory. All methods are safe for concurrent use. +type AllocDir struct { + // AllocDir is the directory used for storing any state + // of this allocation. It will be purged on alloc destroy. + AllocDir string + + // The shared directory is available to all tasks within the same task + // group. + SharedDir string + + // TaskDirs is a mapping of task names to their non-shared directory. + TaskDirs map[string]*TaskDir + + // built is true if Build has successfully run + built bool + + mu sync.RWMutex + + logger hclog.Logger +} + +// AllocDirFS exposes file operations on the alloc dir +type AllocDirFS interface { + List(path string) ([]*cstructs.AllocFileInfo, error) + Stat(path string) (*cstructs.AllocFileInfo, error) + ReadAt(path string, offset int64) (io.ReadCloser, error) + Snapshot(w io.Writer) error + BlockUntilExists(ctx context.Context, path string) (chan error, error) + ChangeEvents(ctx context.Context, path string, curOffset int64) (*watch.FileChanges, error) +} + +// NewAllocDir initializes the AllocDir struct with allocDir as base path for +// the allocation directory. +func NewAllocDir(logger hclog.Logger, allocDir string) *AllocDir { + logger = logger.Named("alloc_dir") + return &AllocDir{ + AllocDir: allocDir, + SharedDir: filepath.Join(allocDir, SharedAllocName), + TaskDirs: make(map[string]*TaskDir), + logger: logger, + } +} + +// Copy an AllocDir and all of its TaskDirs. Returns nil if AllocDir is +// nil. +func (d *AllocDir) Copy() *AllocDir { + d.mu.RLock() + defer d.mu.RUnlock() + + if d == nil { + return nil + } + dcopy := &AllocDir{ + AllocDir: d.AllocDir, + SharedDir: d.SharedDir, + TaskDirs: make(map[string]*TaskDir, len(d.TaskDirs)), + logger: d.logger, + } + for k, v := range d.TaskDirs { + dcopy.TaskDirs[k] = v.Copy() + } + return dcopy +} + +// NewTaskDir creates a new TaskDir and adds it to the AllocDirs TaskDirs map. +func (d *AllocDir) NewTaskDir(name string) *TaskDir { + d.mu.Lock() + defer d.mu.Unlock() + + td := newTaskDir(d.logger, d.AllocDir, name) + d.TaskDirs[name] = td + return td +} + +// Snapshot creates an archive of the files and directories in the data dir of +// the allocation and the task local directories +// +// Since a valid tar may have been written even when an error occurs, a special +// file "NOMAD-${ALLOC_ID}-ERROR.log" will be appended to the tar with the +// error message as the contents. +func (d *AllocDir) Snapshot(w io.Writer) error { + d.mu.RLock() + defer d.mu.RUnlock() + + allocDataDir := filepath.Join(d.SharedDir, SharedDataDir) + rootPaths := []string{allocDataDir} + for _, taskdir := range d.TaskDirs { + rootPaths = append(rootPaths, taskdir.LocalDir) + } + + tw := tar.NewWriter(w) + defer tw.Close() + + walkFn := func(path string, fileInfo os.FileInfo, err error) error { + if err != nil { + return err + } + + // Include the path of the file name relative to the alloc dir + // so that we can put the files in the right directories + relPath, err := filepath.Rel(d.AllocDir, path) + if err != nil { + return err + } + link := "" + if fileInfo.Mode()&os.ModeSymlink != 0 { + target, err := os.Readlink(path) + if err != nil { + return fmt.Errorf("error reading symlink: %v", err) + } + link = target + } + hdr, err := tar.FileInfoHeader(fileInfo, link) + if err != nil { + return fmt.Errorf("error creating file header: %v", err) + } + hdr.Name = relPath + if err := tw.WriteHeader(hdr); err != nil { + return err + } + + // If it's a directory or symlink we just write the header into the tar + if fileInfo.IsDir() || (fileInfo.Mode()&os.ModeSymlink != 0) { + return nil + } + + // Write the file into the archive + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + if _, err := io.Copy(tw, file); err != nil { + return err + } + return nil + } + + // Walk through all the top level directories and add the files and + // directories in the archive + for _, path := range rootPaths { + if err := filepath.Walk(path, walkFn); err != nil { + allocID := filepath.Base(d.AllocDir) + if writeErr := writeError(tw, allocID, err); writeErr != nil { + // This could be bad; other side won't know + // snapshotting failed. It could also just mean + // the snapshotting side closed the connect + // prematurely and won't try to use the tar + // anyway. + d.logger.Warn("snapshotting failed and unable to write error marker", "error", writeErr) + } + return fmt.Errorf("failed to snapshot %s: %v", path, err) + } + } + + return nil +} + +// Move other alloc directory's shared path and local dir to this alloc dir. +func (d *AllocDir) Move(other *AllocDir, tasks []*structs.Task) error { + d.mu.RLock() + if !d.built { + // Enforce the invariant that Build is called before Move + d.mu.RUnlock() + return fmt.Errorf("unable to move to %q - alloc dir is not built", d.AllocDir) + } + + // Moving is slow and only reads immutable fields, so unlock during heavy IO + d.mu.RUnlock() + + // Move the data directory + otherDataDir := filepath.Join(other.SharedDir, SharedDataDir) + dataDir := filepath.Join(d.SharedDir, SharedDataDir) + if fileInfo, err := os.Stat(otherDataDir); fileInfo != nil && err == nil { + os.Remove(dataDir) // remove an empty data dir if it exists + if err := os.Rename(otherDataDir, dataDir); err != nil { + return fmt.Errorf("error moving data dir: %v", err) + } + } + + // Move the task directories + for _, task := range tasks { + otherTaskDir := filepath.Join(other.AllocDir, task.Name) + otherTaskLocal := filepath.Join(otherTaskDir, TaskLocal) + + fileInfo, err := os.Stat(otherTaskLocal) + if fileInfo != nil && err == nil { + // TaskDirs haven't been built yet, so create it + newTaskDir := filepath.Join(d.AllocDir, task.Name) + if err := os.MkdirAll(newTaskDir, 0777); err != nil { + return fmt.Errorf("error creating task %q dir: %v", task.Name, err) + } + localDir := filepath.Join(newTaskDir, TaskLocal) + os.Remove(localDir) // remove an empty local dir if it exists + if err := os.Rename(otherTaskLocal, localDir); err != nil { + return fmt.Errorf("error moving task %q local dir: %v", task.Name, err) + } + } + } + + return nil +} + +// Tears down previously build directory structure. +func (d *AllocDir) Destroy() error { + // Unmount all mounted shared alloc dirs. + var mErr multierror.Error + if err := d.UnmountAll(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + if err := os.RemoveAll(d.AllocDir); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("failed to remove alloc dir %q: %v", d.AllocDir, err)) + } + + // Unset built since the alloc dir has been destroyed. + d.mu.Lock() + d.built = false + d.mu.Unlock() + return mErr.ErrorOrNil() +} + +// UnmountAll linked/mounted directories in task dirs. +func (d *AllocDir) UnmountAll() error { + d.mu.RLock() + defer d.mu.RUnlock() + + var mErr multierror.Error + for _, dir := range d.TaskDirs { + // Check if the directory has the shared alloc mounted. + if pathExists(dir.SharedTaskDir) { + if err := unlinkDir(dir.SharedTaskDir); err != nil { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("failed to unmount shared alloc dir %q: %v", dir.SharedTaskDir, err)) + } else if err := os.RemoveAll(dir.SharedTaskDir); err != nil { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("failed to delete shared alloc dir %q: %v", dir.SharedTaskDir, err)) + } + } + + if pathExists(dir.SecretsDir) { + if err := removeSecretDir(dir.SecretsDir); err != nil { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("failed to remove the secret dir %q: %v", dir.SecretsDir, err)) + } + } + + // Unmount dev/ and proc/ have been mounted. + if err := dir.unmountSpecialDirs(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + + return mErr.ErrorOrNil() +} + +// Build the directory tree for an allocation. +func (d *AllocDir) Build() error { + // Make the alloc directory, owned by the nomad process. + if err := os.MkdirAll(d.AllocDir, 0755); err != nil { + return fmt.Errorf("Failed to make the alloc directory %v: %v", d.AllocDir, err) + } + + // Make the shared directory and make it available to all user/groups. + if err := os.MkdirAll(d.SharedDir, 0777); err != nil { + return err + } + + // Make the shared directory have non-root permissions. + if err := dropDirPermissions(d.SharedDir, os.ModePerm); err != nil { + return err + } + + // Create shared subdirs + for _, dir := range SharedAllocDirs { + p := filepath.Join(d.SharedDir, dir) + if err := os.MkdirAll(p, 0777); err != nil { + return err + } + if err := dropDirPermissions(p, os.ModePerm); err != nil { + return err + } + } + + // Mark as built + d.mu.Lock() + d.built = true + d.mu.Unlock() + return nil +} + +// List returns the list of files at a path relative to the alloc dir +func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) { + if escapes, err := structs.PathEscapesAllocDir("", path); err != nil { + return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) + } else if escapes { + return nil, fmt.Errorf("Path escapes the alloc directory") + } + + p := filepath.Join(d.AllocDir, path) + finfos, err := ioutil.ReadDir(p) + if err != nil { + return []*cstructs.AllocFileInfo{}, err + } + files := make([]*cstructs.AllocFileInfo, len(finfos)) + for idx, info := range finfos { + files[idx] = &cstructs.AllocFileInfo{ + Name: info.Name(), + IsDir: info.IsDir(), + Size: info.Size(), + FileMode: info.Mode().String(), + ModTime: info.ModTime(), + } + } + return files, err +} + +// Stat returns information about the file at a path relative to the alloc dir +func (d *AllocDir) Stat(path string) (*cstructs.AllocFileInfo, error) { + if escapes, err := structs.PathEscapesAllocDir("", path); err != nil { + return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) + } else if escapes { + return nil, fmt.Errorf("Path escapes the alloc directory") + } + + p := filepath.Join(d.AllocDir, path) + info, err := os.Stat(p) + if err != nil { + return nil, err + } + + return &cstructs.AllocFileInfo{ + Size: info.Size(), + Name: info.Name(), + IsDir: info.IsDir(), + FileMode: info.Mode().String(), + ModTime: info.ModTime(), + }, nil +} + +// ReadAt returns a reader for a file at the path relative to the alloc dir +func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) { + if escapes, err := structs.PathEscapesAllocDir("", path); err != nil { + return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) + } else if escapes { + return nil, fmt.Errorf("Path escapes the alloc directory") + } + + p := filepath.Join(d.AllocDir, path) + + // Check if it is trying to read into a secret directory + d.mu.RLock() + for _, dir := range d.TaskDirs { + if filepath.HasPrefix(p, dir.SecretsDir) { + d.mu.RUnlock() + return nil, fmt.Errorf("Reading secret file prohibited: %s", path) + } + } + d.mu.RUnlock() + + f, err := os.Open(p) + if err != nil { + return nil, err + } + if _, err := f.Seek(offset, 0); err != nil { + return nil, fmt.Errorf("can't seek to offset %q: %v", offset, err) + } + return f, nil +} + +// BlockUntilExists blocks until the passed file relative the allocation +// directory exists. The block can be cancelled with the passed context. +func (d *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan error, error) { + if escapes, err := structs.PathEscapesAllocDir("", path); err != nil { + return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) + } else if escapes { + return nil, fmt.Errorf("Path escapes the alloc directory") + } + + // Get the path relative to the alloc directory + p := filepath.Join(d.AllocDir, path) + watcher := getFileWatcher(p) + returnCh := make(chan error, 1) + t := &tomb.Tomb{} + go func() { + <-ctx.Done() + t.Kill(nil) + }() + go func() { + returnCh <- watcher.BlockUntilExists(t) + close(returnCh) + }() + return returnCh, nil +} + +// ChangeEvents watches for changes to the passed path relative to the +// allocation directory. The offset should be the last read offset. The context is +// used to clean up the watch. +func (d *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int64) (*watch.FileChanges, error) { + if escapes, err := structs.PathEscapesAllocDir("", path); err != nil { + return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) + } else if escapes { + return nil, fmt.Errorf("Path escapes the alloc directory") + } + + t := &tomb.Tomb{} + go func() { + <-ctx.Done() + t.Kill(nil) + }() + + // Get the path relative to the alloc directory + p := filepath.Join(d.AllocDir, path) + watcher := getFileWatcher(p) + return watcher.ChangeEvents(t, curOffset) +} + +// getFileWatcher returns a FileWatcher for the given path. +func getFileWatcher(path string) watch.FileWatcher { + return watch.NewPollingFileWatcher(path) +} + +// fileCopy from src to dst setting the permissions and owner (if uid & gid are +// both greater than 0) +func fileCopy(src, dst string, uid, gid int, perm os.FileMode) error { + // Do a simple copy. + srcFile, err := os.Open(src) + if err != nil { + return fmt.Errorf("Couldn't open src file %v: %v", src, err) + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE, perm) + if err != nil { + return fmt.Errorf("Couldn't create destination file %v: %v", dst, err) + } + defer dstFile.Close() + + if _, err := io.Copy(dstFile, srcFile); err != nil { + return fmt.Errorf("Couldn't copy %q to %q: %v", src, dst, err) + } + + if uid != idUnsupported && gid != idUnsupported { + if err := dstFile.Chown(uid, gid); err != nil { + return fmt.Errorf("Couldn't copy %q to %q: %v", src, dst, err) + } + } + + return nil +} + +// pathExists is a helper function to check if the path exists. +func pathExists(path string) bool { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} + +// pathEmpty returns true if a path exists, is listable, and is empty. If the +// path does not exist or is not listable an error is returned. +func pathEmpty(path string) (bool, error) { + f, err := os.Open(path) + if err != nil { + return false, err + } + defer f.Close() + entries, err := f.Readdir(1) + if err != nil && err != io.EOF { + return false, err + } + return len(entries) == 0, nil +} + +// createDir creates a directory structure inside the basepath. This functions +// preserves the permissions of each of the subdirectories in the relative path +// by looking up the permissions in the host. +func createDir(basePath, relPath string) error { + filePerms, err := splitPath(relPath) + if err != nil { + return err + } + + // We are going backwards since we create the root of the directory first + // and then create the entire nested structure. + for i := len(filePerms) - 1; i >= 0; i-- { + fi := filePerms[i] + destDir := filepath.Join(basePath, fi.Name) + if err := os.MkdirAll(destDir, fi.Perm); err != nil { + return err + } + + if fi.Uid != idUnsupported && fi.Gid != idUnsupported { + if err := os.Chown(destDir, fi.Uid, fi.Gid); err != nil { + return err + } + } + } + return nil +} + +// fileInfo holds the path and the permissions of a file +type fileInfo struct { + Name string + Perm os.FileMode + + // Uid and Gid are unsupported on Windows + Uid int + Gid int +} + +// splitPath stats each subdirectory of a path. The first element of the array +// is the file passed to this function, and the last element is the root of the +// path. +func splitPath(path string) ([]fileInfo, error) { + var mode os.FileMode + fi, err := os.Stat(path) + + // If the path is not present in the host then we respond with the most + // flexible permission. + uid, gid := idUnsupported, idUnsupported + if err != nil { + mode = os.ModePerm + } else { + uid, gid = getOwner(fi) + mode = fi.Mode() + } + var dirs []fileInfo + dirs = append(dirs, fileInfo{Name: path, Perm: mode, Uid: uid, Gid: gid}) + currentDir := path + for { + dir := filepath.Dir(filepath.Clean(currentDir)) + if dir == currentDir { + break + } + + // We try to find the permission of the file in the host. If the path is not + // present in the host then we respond with the most flexible permission. + uid, gid := idUnsupported, idUnsupported + fi, err := os.Stat(dir) + if err != nil { + mode = os.ModePerm + } else { + uid, gid = getOwner(fi) + mode = fi.Mode() + } + dirs = append(dirs, fileInfo{Name: dir, Perm: mode, Uid: uid, Gid: gid}) + currentDir = dir + } + return dirs, nil +} + +// SnapshotErrorFilename returns the filename which will exist if there was an +// error snapshotting a tar. +func SnapshotErrorFilename(allocID string) string { + return fmt.Sprintf("NOMAD-%s-ERROR.log", allocID) +} + +// writeError writes a special file to a tar archive with the error encountered +// during snapshotting. See Snapshot(). +func writeError(tw *tar.Writer, allocID string, err error) error { + contents := []byte(fmt.Sprintf("Error snapshotting: %v", err)) + hdr := tar.Header{ + Name: SnapshotErrorFilename(allocID), + Mode: 0666, + Size: int64(len(contents)), + AccessTime: SnapshotErrorTime, + ChangeTime: SnapshotErrorTime, + ModTime: SnapshotErrorTime, + Typeflag: tar.TypeReg, + } + + if err := tw.WriteHeader(&hdr); err != nil { + return err + } + + _, err = tw.Write(contents) + return err +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/fs_darwin.go b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_darwin.go new file mode 100644 index 0000000..fe29791 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_darwin.go @@ -0,0 +1,26 @@ +package allocdir + +import ( + "os" + "syscall" +) + +// linkDir hardlinks src to dst. The src and dst must be on the same filesystem. +func linkDir(src, dst string) error { + return syscall.Link(src, dst) +} + +// unlinkDir removes a directory link. +func unlinkDir(dir string) error { + return syscall.Unlink(dir) +} + +// createSecretDir creates the secrets dir folder at the given path +func createSecretDir(dir string) error { + return os.MkdirAll(dir, 0777) +} + +// removeSecretDir removes the secrets dir folder +func removeSecretDir(dir string) error { + return os.RemoveAll(dir) +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/fs_freebsd.go b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_freebsd.go new file mode 100644 index 0000000..fe29791 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_freebsd.go @@ -0,0 +1,26 @@ +package allocdir + +import ( + "os" + "syscall" +) + +// linkDir hardlinks src to dst. The src and dst must be on the same filesystem. +func linkDir(src, dst string) error { + return syscall.Link(src, dst) +} + +// unlinkDir removes a directory link. +func unlinkDir(dir string) error { + return syscall.Unlink(dir) +} + +// createSecretDir creates the secrets dir folder at the given path +func createSecretDir(dir string) error { + return os.MkdirAll(dir, 0777) +} + +// removeSecretDir removes the secrets dir folder +func removeSecretDir(dir string) error { + return os.RemoveAll(dir) +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/fs_linux.go b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_linux.go new file mode 100644 index 0000000..7a76c5d --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_linux.go @@ -0,0 +1,90 @@ +package allocdir + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + // secretDirTmpfsSize is the size of the tmpfs per task in MBs + secretDirTmpfsSize = 1 + + // secretMarker is the filename of the marker created so Nomad doesn't + // try to mount the secrets tmpfs more than once + secretMarker = ".nomad-mount" +) + +// linkDir bind mounts src to dst as Linux doesn't support hardlinking +// directories. +func linkDir(src, dst string) error { + if err := os.MkdirAll(dst, 0777); err != nil { + return err + } + + return syscall.Mount(src, dst, "", syscall.MS_BIND, "") +} + +// unlinkDir unmounts a bind mounted directory as Linux doesn't support +// hardlinking directories. If the dir is already unmounted no error is +// returned. +func unlinkDir(dir string) error { + if err := syscall.Unmount(dir, 0); err != nil { + if err != syscall.EINVAL { + return err + } + } + return nil +} + +// createSecretDir creates the secrets dir folder at the given path using a +// tmpfs +func createSecretDir(dir string) error { + // Only mount the tmpfs if we are root + if unix.Geteuid() == 0 { + if err := os.MkdirAll(dir, 0777); err != nil { + return err + } + + // Check for marker file and skip mounting if it exists + marker := filepath.Join(dir, secretMarker) + if _, err := os.Stat(marker); err == nil { + return nil + } + + var flags uintptr + flags = syscall.MS_NOEXEC + options := fmt.Sprintf("size=%dm", secretDirTmpfsSize) + if err := syscall.Mount("tmpfs", dir, "tmpfs", flags, options); err != nil { + return os.NewSyscallError("mount", err) + } + + // Create the marker file so we don't try to mount more than once + f, err := os.OpenFile(marker, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + // Hard fail since if this fails something is really wrong + return err + } + f.Close() + return nil + } + + return os.MkdirAll(dir, 0777) +} + +// createSecretDir removes the secrets dir folder +func removeSecretDir(dir string) error { + if unix.Geteuid() == 0 { + if err := unlinkDir(dir); err != nil { + // Ignore invalid path errors + if err != syscall.ENOENT { + return os.NewSyscallError("unmount", err) + } + } + + } + return os.RemoveAll(dir) +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/fs_solaris.go b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_solaris.go new file mode 100644 index 0000000..088b58f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_solaris.go @@ -0,0 +1,27 @@ +package allocdir + +import ( + "os" + "syscall" +) + +// linkDir hardlinks src to dst. The src and dst must be on the same filesystem. +func linkDir(src, dst string) error { + return syscall.Link(src, dst) +} + +// unlinkDir removes a directory link. +func unlinkDir(dir string) error { + return syscall.Unlink(dir) +} + +// createSecretDir creates the secrets dir folder at the given path +func createSecretDir(dir string) error { + // TODO solaris has support for tmpfs so use that + return os.MkdirAll(dir, 0777) +} + +// removeSecretDir removes the secrets dir folder +func removeSecretDir(dir string) error { + return os.RemoveAll(dir) +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/fs_unix.go b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_unix.go new file mode 100644 index 0000000..77447a3 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_unix.go @@ -0,0 +1,107 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package allocdir + +import ( + "fmt" + "os" + "os/user" + "path/filepath" + "strconv" + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + // SharedAllocContainerPath is the path inside container for mounted + // directory shared across tasks in a task group. + SharedAllocContainerPath = filepath.Join("/", SharedAllocName) + + // TaskLocalContainer is the path inside a container for mounted directory + // for local storage. + TaskLocalContainerPath = filepath.Join("/", TaskLocal) + + // TaskSecretsContainerPath is the path inside a container for mounted + // secrets directory + TaskSecretsContainerPath = filepath.Join("/", TaskSecrets) +) + +// dropDirPermissions gives full access to a directory to all users and sets +// the owner to nobody. +func dropDirPermissions(path string, desired os.FileMode) error { + if err := os.Chmod(path, desired|0777); err != nil { + return fmt.Errorf("Chmod(%v) failed: %v", path, err) + } + + // Can't change owner if not root. + if unix.Geteuid() != 0 { + return nil + } + + u, err := user.Lookup("nobody") + if err != nil { + return err + } + + uid, err := getUid(u) + if err != nil { + return err + } + + gid, err := getGid(u) + if err != nil { + return err + } + + if err := os.Chown(path, uid, gid); err != nil { + return fmt.Errorf("Couldn't change owner/group of %v to (uid: %v, gid: %v): %v", path, uid, gid, err) + } + + return nil +} + +// getUid for a user +func getUid(u *user.User) (int, error) { + uid, err := strconv.Atoi(u.Uid) + if err != nil { + return 0, fmt.Errorf("Unable to convert Uid to an int: %v", err) + } + + return uid, nil +} + +// getGid for a user +func getGid(u *user.User) (int, error) { + gid, err := strconv.Atoi(u.Gid) + if err != nil { + return 0, fmt.Errorf("Unable to convert Gid to an int: %v", err) + } + + return gid, nil +} + +// linkOrCopy attempts to hardlink dst to src and fallsback to copying if the +// hardlink fails. +func linkOrCopy(src, dst string, uid, gid int, perm os.FileMode) error { + // Avoid link/copy if the file already exists in the chroot + // TODO 0.6 clean this up. This was needed because chroot creation fails + // when a process restarts. + if fileInfo, _ := os.Stat(dst); fileInfo != nil { + return nil + } + // Attempt to hardlink. + if err := os.Link(src, dst); err == nil { + return nil + } + + return fileCopy(src, dst, uid, gid, perm) +} + +func getOwner(fi os.FileInfo) (int, int) { + stat, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return -1, -1 + } + return int(stat.Uid), int(stat.Gid) +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/fs_windows.go b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_windows.go new file mode 100644 index 0000000..466423e --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/fs_windows.go @@ -0,0 +1,61 @@ +package allocdir + +import ( + "os" + "path/filepath" +) + +var ( + // SharedAllocContainerPath is the path inside container for mounted + // directory shared across tasks in a task group. + SharedAllocContainerPath = filepath.Join("c:\\", SharedAllocName) + + // TaskLocalContainer is the path inside a container for mounted directory + // for local storage. + TaskLocalContainerPath = filepath.Join("c:\\", TaskLocal) + + // TaskSecretsContainerPath is the path inside a container for mounted + // secrets directory + TaskSecretsContainerPath = filepath.Join("c:\\", TaskSecrets) +) + +// linkOrCopy is always copies dst to src on Windows. +func linkOrCopy(src, dst string, uid, gid int, perm os.FileMode) error { + return fileCopy(src, dst, uid, gid, perm) +} + +// The windows version does nothing currently. +func linkDir(src, dst string) error { + return nil +} + +// The windows version does nothing currently. +func unlinkDir(dir string) error { + return nil +} + +// createSecretDir creates the secrets dir folder at the given path +func createSecretDir(dir string) error { + return os.MkdirAll(dir, 0777) +} + +// removeSecretDir removes the secrets dir folder +func removeSecretDir(dir string) error { + return os.RemoveAll(dir) +} + +// The windows version does nothing currently. +func dropDirPermissions(path string, desired os.FileMode) error { + return nil +} + +// MountSpecialDirs mounts the dev and proc file system on the chroot of the +// task. It's a no-op on windows. +func MountSpecialDirs(taskDir string) error { + return nil +} + +// getOwner doesn't work on Windows as Windows doesn't use int user IDs +func getOwner(os.FileInfo) (int, int) { + return idUnsupported, idUnsupported +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir.go b/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir.go new file mode 100644 index 0000000..9d99f97 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir.go @@ -0,0 +1,234 @@ +package allocdir + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + hclog "github.com/hashicorp/go-hclog" +) + +// TaskDir contains all of the paths relevant to a task. All paths are on the +// host system so drivers should mount/link into task containers as necessary. +type TaskDir struct { + // AllocDir is the path to the alloc directory on the host + AllocDir string + + // Dir is the path to Task directory on the host + Dir string + + // SharedAllocDir is the path to shared alloc directory on the host + // /alloc/ + SharedAllocDir string + + // SharedTaskDir is the path to the shared alloc directory linked into + // the task directory on the host. + // /alloc/ + SharedTaskDir string + + // LocalDir is the path to the task's local directory on the host + // /local/ + LocalDir string + + // LogDir is the path to the task's log directory on the host + // /alloc/logs/ + LogDir string + + // SecretsDir is the path to secrets/ directory on the host + // /secrets/ + SecretsDir string + + logger hclog.Logger +} + +// newTaskDir creates a TaskDir struct with paths set. Call Build() to +// create paths on disk. +// +// Call AllocDir.NewTaskDir to create new TaskDirs +func newTaskDir(logger hclog.Logger, allocDir, taskName string) *TaskDir { + taskDir := filepath.Join(allocDir, taskName) + + logger = logger.Named("task_dir").With("task_name", taskName) + + return &TaskDir{ + AllocDir: allocDir, + Dir: taskDir, + SharedAllocDir: filepath.Join(allocDir, SharedAllocName), + LogDir: filepath.Join(allocDir, SharedAllocName, LogDirName), + SharedTaskDir: filepath.Join(taskDir, SharedAllocName), + LocalDir: filepath.Join(taskDir, TaskLocal), + SecretsDir: filepath.Join(taskDir, TaskSecrets), + logger: logger, + } +} + +// Copy a TaskDir. Panics if TaskDir is nil as TaskDirs should never be nil. +func (t *TaskDir) Copy() *TaskDir { + // No nested structures other than the logger which is safe to share, + // so just copy the struct + tcopy := *t + return &tcopy +} + +// Build default directories and permissions in a task directory. chrootCreated +// allows skipping chroot creation if the caller knows it has already been +// done. +func (t *TaskDir) Build(createChroot bool, chroot map[string]string) error { + if err := os.MkdirAll(t.Dir, 0777); err != nil { + return err + } + + // Make the task directory have non-root permissions. + if err := dropDirPermissions(t.Dir, os.ModePerm); err != nil { + return err + } + + // Create a local directory that each task can use. + if err := os.MkdirAll(t.LocalDir, 0777); err != nil { + return err + } + + if err := dropDirPermissions(t.LocalDir, os.ModePerm); err != nil { + return err + } + + // Create the directories that should be in every task. + for dir, perms := range TaskDirs { + absdir := filepath.Join(t.Dir, dir) + if err := os.MkdirAll(absdir, perms); err != nil { + return err + } + + if err := dropDirPermissions(absdir, perms); err != nil { + return err + } + } + + // Only link alloc dir into task dir for chroot fs isolation. + // Image based isolation will bind the shared alloc dir in the driver. + // If there's no isolation the task will use the host path to the + // shared alloc dir. + if createChroot { + // If the path doesn't exist OR it exists and is empty, link it + empty, _ := pathEmpty(t.SharedTaskDir) + if !pathExists(t.SharedTaskDir) || empty { + if err := linkDir(t.SharedAllocDir, t.SharedTaskDir); err != nil { + return fmt.Errorf("Failed to mount shared directory for task: %v", err) + } + } + } + + // Create the secret directory + if err := createSecretDir(t.SecretsDir); err != nil { + return err + } + + if err := dropDirPermissions(t.SecretsDir, os.ModePerm); err != nil { + return err + } + + // Build chroot if chroot filesystem isolation is going to be used + if createChroot { + if err := t.buildChroot(chroot); err != nil { + return err + } + } + + return nil +} + +// buildChroot takes a mapping of absolute directory or file paths on the host +// to their intended, relative location within the task directory. This +// attempts hardlink and then defaults to copying. If the path exists on the +// host and can't be embedded an error is returned. +func (t *TaskDir) buildChroot(entries map[string]string) error { + return t.embedDirs(entries) +} + +func (t *TaskDir) embedDirs(entries map[string]string) error { + subdirs := make(map[string]string) + for source, dest := range entries { + // Check to see if directory exists on host. + s, err := os.Stat(source) + if os.IsNotExist(err) { + continue + } + + // Embedding a single file + if !s.IsDir() { + if err := createDir(t.Dir, filepath.Dir(dest)); err != nil { + return fmt.Errorf("Couldn't create destination directory %v: %v", dest, err) + } + + // Copy the file. + taskEntry := filepath.Join(t.Dir, dest) + uid, gid := getOwner(s) + if err := linkOrCopy(source, taskEntry, uid, gid, s.Mode().Perm()); err != nil { + return err + } + + continue + } + + // Create destination directory. + destDir := filepath.Join(t.Dir, dest) + + if err := createDir(t.Dir, dest); err != nil { + return fmt.Errorf("Couldn't create destination directory %v: %v", destDir, err) + } + + // Enumerate the files in source. + dirEntries, err := ioutil.ReadDir(source) + if err != nil { + return fmt.Errorf("Couldn't read directory %v: %v", source, err) + } + + for _, entry := range dirEntries { + hostEntry := filepath.Join(source, entry.Name()) + taskEntry := filepath.Join(destDir, filepath.Base(hostEntry)) + if entry.IsDir() { + subdirs[hostEntry] = filepath.Join(dest, filepath.Base(hostEntry)) + continue + } + + // Check if entry exists. This can happen if restarting a failed + // task. + if _, err := os.Lstat(taskEntry); err == nil { + continue + } + + if !entry.Mode().IsRegular() { + // If it is a symlink we can create it, otherwise we skip it. + if entry.Mode()&os.ModeSymlink == 0 { + continue + } + + link, err := os.Readlink(hostEntry) + if err != nil { + return fmt.Errorf("Couldn't resolve symlink for %v: %v", source, err) + } + + if err := os.Symlink(link, taskEntry); err != nil { + // Symlinking twice + if err.(*os.LinkError).Err.Error() != "file exists" { + return fmt.Errorf("Couldn't create symlink: %v", err) + } + } + continue + } + + uid, gid := getOwner(entry) + if err := linkOrCopy(hostEntry, taskEntry, uid, gid, entry.Mode().Perm()); err != nil { + return err + } + } + } + + // Recurse on self to copy subdirectories. + if len(subdirs) != 0 { + return t.embedDirs(subdirs) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir_linux.go b/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir_linux.go new file mode 100644 index 0000000..9bb57bd --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir_linux.go @@ -0,0 +1,36 @@ +package allocdir + +import ( + "fmt" + "os" + "path/filepath" + + multierror "github.com/hashicorp/go-multierror" +) + +// unmountSpecialDirs unmounts the dev and proc file system from the chroot. No +// error is returned if the directories do not exist or have already been +// unmounted. +func (t *TaskDir) unmountSpecialDirs() error { + errs := new(multierror.Error) + dev := filepath.Join(t.Dir, "dev") + if pathExists(dev) { + if err := unlinkDir(dev); err != nil { + errs = multierror.Append(errs, fmt.Errorf("Failed to unmount dev %q: %v", dev, err)) + } else if err := os.RemoveAll(dev); err != nil { + errs = multierror.Append(errs, fmt.Errorf("Failed to delete dev directory %q: %v", dev, err)) + } + } + + // Unmount proc. + proc := filepath.Join(t.Dir, "proc") + if pathExists(proc) { + if err := unlinkDir(proc); err != nil { + errs = multierror.Append(errs, fmt.Errorf("Failed to unmount proc %q: %v", proc, err)) + } else if err := os.RemoveAll(proc); err != nil { + errs = multierror.Append(errs, fmt.Errorf("Failed to delete proc directory %q: %v", dev, err)) + } + } + + return errs.ErrorOrNil() +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir_nonlinux.go b/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir_nonlinux.go new file mode 100644 index 0000000..08e7ba6 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/task_dir_nonlinux.go @@ -0,0 +1,8 @@ +// +build !linux + +package allocdir + +// currently a noop on non-Linux platforms +func (d *TaskDir) unmountSpecialDirs() error { + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/stats/cpu.go b/vendor/github.com/hashicorp/nomad/client/stats/cpu.go new file mode 100644 index 0000000..47a9ee3 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/stats/cpu.go @@ -0,0 +1,61 @@ +package stats + +import ( + "runtime" + "time" + + shelpers "github.com/hashicorp/nomad/helper/stats" +) + +// CpuStats calculates cpu usage percentage +type CpuStats struct { + prevCpuTime float64 + prevTime time.Time + + totalCpus int +} + +// NewCpuStats returns a cpu stats calculator +func NewCpuStats() *CpuStats { + numCpus := runtime.NumCPU() + cpuStats := &CpuStats{ + totalCpus: numCpus, + } + return cpuStats +} + +// Percent calculates the cpu usage percentage based on the current cpu usage +// and the previous cpu usage where usage is given as time in nanoseconds spend +// in the cpu +func (c *CpuStats) Percent(cpuTime float64) float64 { + now := time.Now() + + if c.prevCpuTime == 0.0 { + // invoked first time + c.prevCpuTime = cpuTime + c.prevTime = now + return 0.0 + } + + timeDelta := now.Sub(c.prevTime).Nanoseconds() + ret := c.calculatePercent(c.prevCpuTime, cpuTime, timeDelta) + c.prevCpuTime = cpuTime + c.prevTime = now + return ret +} + +// TicksConsumed calculates the total ticks consumes by the process across all +// cpu cores +func (c *CpuStats) TicksConsumed(percent float64) float64 { + return (percent / 100) * shelpers.TotalTicksAvailable() / float64(c.totalCpus) +} + +func (c *CpuStats) calculatePercent(t1, t2 float64, timeDelta int64) float64 { + vDelta := t2 - t1 + if timeDelta <= 0 || vDelta <= 0.0 { + return 0.0 + } + + overall_percent := (vDelta / float64(timeDelta)) * 100.0 + return overall_percent +} diff --git a/vendor/github.com/hashicorp/nomad/client/stats/cpu_unix.go b/vendor/github.com/hashicorp/nomad/client/stats/cpu_unix.go new file mode 100644 index 0000000..3416cd4 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/stats/cpu_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package stats + +import ( + shelpers "github.com/hashicorp/nomad/helper/stats" + "github.com/shirou/gopsutil/cpu" +) + +func (h *HostStatsCollector) collectCPUStats() (cpus []*CPUStats, totalTicks float64, err error) { + + ticksConsumed := 0.0 + cpuStats, err := cpu.Times(true) + if err != nil { + return nil, 0.0, err + } + cs := make([]*CPUStats, len(cpuStats)) + for idx, cpuStat := range cpuStats { + percentCalculator, ok := h.statsCalculator[cpuStat.CPU] + if !ok { + percentCalculator = NewHostCpuStatsCalculator() + h.statsCalculator[cpuStat.CPU] = percentCalculator + } + idle, user, system, total := percentCalculator.Calculate(cpuStat) + cs[idx] = &CPUStats{ + CPU: cpuStat.CPU, + User: user, + System: system, + Idle: idle, + Total: total, + } + ticksConsumed += (total / 100.0) * (shelpers.TotalTicksAvailable() / float64(len(cpuStats))) + } + + return cs, ticksConsumed, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/stats/cpu_windows.go b/vendor/github.com/hashicorp/nomad/client/stats/cpu_windows.go new file mode 100644 index 0000000..64b59c0 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/stats/cpu_windows.go @@ -0,0 +1,53 @@ +// +build windows + +package stats + +import ( + "fmt" + + shelpers "github.com/hashicorp/nomad/helper/stats" + "github.com/shirou/gopsutil/cpu" +) + +func (h *HostStatsCollector) collectCPUStats() (cpus []*CPUStats, totalTicks float64, err error) { + // Get the per cpu stats + cpuStats, err := cpu.Times(true) + if err != nil { + return nil, 0.0, err + } + + cs := make([]*CPUStats, len(cpuStats)) + for idx, cpuStat := range cpuStats { + + // On windows they are already in percent + cs[idx] = &CPUStats{ + CPU: cpuStat.CPU, + User: cpuStat.User, + System: cpuStat.System, + Idle: cpuStat.Idle, + Total: cpuStat.Total(), + } + } + + // Get the number of ticks + allCpu, err := cpu.Times(false) + if err != nil { + return nil, 0.0, err + } + if len(allCpu) != 1 { + return nil, 0.0, fmt.Errorf("unexpected number of cpus (%d)", len(allCpu)) + } + + // We use the calculator because when retrieving against all cpus it is + // returned as ticks. + all := allCpu[0] + percentCalculator, ok := h.statsCalculator[all.CPU] + if !ok { + percentCalculator = NewHostCpuStatsCalculator() + h.statsCalculator[all.CPU] = percentCalculator + } + _, _, _, total := percentCalculator.Calculate(all) + ticks := (total / 100) * shelpers.TotalTicksAvailable() + + return cs, ticks, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/stats/host.go b/vendor/github.com/hashicorp/nomad/client/stats/host.go new file mode 100644 index 0000000..28d6190 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/stats/host.go @@ -0,0 +1,301 @@ +package stats + +import ( + "fmt" + "math" + "runtime" + "sync" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/plugins/device" + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/mem" +) + +// HostStats represents resource usage stats of the host running a Nomad client +type HostStats struct { + Memory *MemoryStats + CPU []*CPUStats + DiskStats []*DiskStats + AllocDirStats *DiskStats + DeviceStats []*DeviceGroupStats + Uptime uint64 + Timestamp int64 + CPUTicksConsumed float64 +} + +// MemoryStats represents stats related to virtual memory usage +type MemoryStats struct { + Total uint64 + Available uint64 + Used uint64 + Free uint64 +} + +// CPUStats represents stats related to cpu usage +type CPUStats struct { + CPU string + User float64 + System float64 + Idle float64 + Total float64 +} + +// DiskStats represents stats related to disk usage +type DiskStats struct { + Device string + Mountpoint string + Size uint64 + Used uint64 + Available uint64 + UsedPercent float64 + InodesUsedPercent float64 +} + +// DeviceGroupStats represents stats related to device group +type DeviceGroupStats = device.DeviceGroupStats + +// DeviceStatsCollector is used to retrieve all the latest statistics for all devices. +type DeviceStatsCollector func() []*DeviceGroupStats + +// NodeStatsCollector is an interface which is used for the purposes of mocking +// the HostStatsCollector in the tests +type NodeStatsCollector interface { + Collect() error + Stats() *HostStats +} + +// HostStatsCollector collects host resource usage stats +type HostStatsCollector struct { + numCores int + statsCalculator map[string]*HostCpuStatsCalculator + hostStats *HostStats + hostStatsLock sync.RWMutex + allocDir string + deviceStatsCollector DeviceStatsCollector + + // badParts is a set of partitions whose usage cannot be read; used to + // squelch logspam. + badParts map[string]struct{} + + logger hclog.Logger +} + +// NewHostStatsCollector returns a HostStatsCollector. The allocDir is passed in +// so that we can present the disk related statistics for the mountpoint where +// the allocation directory lives +func NewHostStatsCollector(logger hclog.Logger, allocDir string, deviceStatsCollector DeviceStatsCollector) *HostStatsCollector { + logger = logger.Named("host_stats") + numCores := runtime.NumCPU() + statsCalculator := make(map[string]*HostCpuStatsCalculator) + collector := &HostStatsCollector{ + statsCalculator: statsCalculator, + numCores: numCores, + logger: logger, + allocDir: allocDir, + badParts: make(map[string]struct{}), + deviceStatsCollector: deviceStatsCollector, + } + return collector +} + +// Collect collects stats related to resource usage of a host +func (h *HostStatsCollector) Collect() error { + h.hostStatsLock.Lock() + defer h.hostStatsLock.Unlock() + return h.collectLocked() +} + +// collectLocked collects stats related to resource usage of the host but should +// be called with the lock held. +func (h *HostStatsCollector) collectLocked() error { + hs := &HostStats{Timestamp: time.Now().UTC().UnixNano()} + + // Determine up-time + uptime, err := host.Uptime() + if err != nil { + return err + } + hs.Uptime = uptime + + // Collect memory stats + mstats, err := h.collectMemoryStats() + if err != nil { + return err + } + hs.Memory = mstats + + // Collect cpu stats + cpus, ticks, err := h.collectCPUStats() + if err != nil { + return err + } + hs.CPU = cpus + hs.CPUTicksConsumed = ticks + + // Collect disk stats + diskStats, err := h.collectDiskStats() + if err != nil { + return err + } + hs.DiskStats = diskStats + + // Getting the disk stats for the allocation directory + usage, err := disk.Usage(h.allocDir) + if err != nil { + return fmt.Errorf("failed to find disk usage of alloc_dir %q: %v", h.allocDir, err) + } + hs.AllocDirStats = h.toDiskStats(usage, nil) + + // Collect devices stats + deviceStats := h.collectDeviceGroupStats() + hs.DeviceStats = deviceStats + + // Update the collected status object. + h.hostStats = hs + + return nil +} + +func (h *HostStatsCollector) collectMemoryStats() (*MemoryStats, error) { + memStats, err := mem.VirtualMemory() + if err != nil { + return nil, err + } + mem := &MemoryStats{ + Total: memStats.Total, + Available: memStats.Available, + Used: memStats.Used, + Free: memStats.Free, + } + + return mem, nil +} + +func (h *HostStatsCollector) collectDiskStats() ([]*DiskStats, error) { + partitions, err := disk.Partitions(false) + if err != nil { + return nil, err + } + + var diskStats []*DiskStats + for _, partition := range partitions { + usage, err := disk.Usage(partition.Mountpoint) + if err != nil { + if _, ok := h.badParts[partition.Mountpoint]; ok { + // already known bad, don't log again + continue + } + + h.badParts[partition.Mountpoint] = struct{}{} + h.logger.Warn("error fetching host disk usage stats", "error", err, "partition", partition.Mountpoint) + continue + } + delete(h.badParts, partition.Mountpoint) + + ds := h.toDiskStats(usage, &partition) + diskStats = append(diskStats, ds) + } + + return diskStats, nil +} + +func (h *HostStatsCollector) collectDeviceGroupStats() []*DeviceGroupStats { + if h.deviceStatsCollector == nil { + return []*DeviceGroupStats{} + } + + return h.deviceStatsCollector() +} + +// Stats returns the host stats that has been collected +func (h *HostStatsCollector) Stats() *HostStats { + h.hostStatsLock.RLock() + defer h.hostStatsLock.RUnlock() + + if h.hostStats == nil { + if err := h.collectLocked(); err != nil { + h.logger.Warn("error fetching host resource usage stats", "error", err) + } + } + + return h.hostStats +} + +// toDiskStats merges UsageStat and PartitionStat to create a DiskStat +func (h *HostStatsCollector) toDiskStats(usage *disk.UsageStat, partitionStat *disk.PartitionStat) *DiskStats { + ds := DiskStats{ + Size: usage.Total, + Used: usage.Used, + Available: usage.Free, + UsedPercent: usage.UsedPercent, + InodesUsedPercent: usage.InodesUsedPercent, + } + if math.IsNaN(ds.UsedPercent) { + ds.UsedPercent = 0.0 + } + if math.IsNaN(ds.InodesUsedPercent) { + ds.InodesUsedPercent = 0.0 + } + + if partitionStat != nil { + ds.Device = partitionStat.Device + ds.Mountpoint = partitionStat.Mountpoint + } + + return &ds +} + +// HostCpuStatsCalculator calculates cpu usage percentages +type HostCpuStatsCalculator struct { + prevIdle float64 + prevUser float64 + prevSystem float64 + prevBusy float64 + prevTotal float64 +} + +// NewHostCpuStatsCalculator returns a HostCpuStatsCalculator +func NewHostCpuStatsCalculator() *HostCpuStatsCalculator { + return &HostCpuStatsCalculator{} +} + +// Calculate calculates the current cpu usage percentages +func (h *HostCpuStatsCalculator) Calculate(times cpu.TimesStat) (idle float64, user float64, system float64, total float64) { + currentIdle := times.Idle + currentUser := times.User + currentSystem := times.System + currentTotal := times.Total() + currentBusy := times.User + times.System + times.Nice + times.Iowait + times.Irq + + times.Softirq + times.Steal + times.Guest + times.GuestNice + times.Stolen + + deltaTotal := currentTotal - h.prevTotal + idle = ((currentIdle - h.prevIdle) / deltaTotal) * 100 + user = ((currentUser - h.prevUser) / deltaTotal) * 100 + system = ((currentSystem - h.prevSystem) / deltaTotal) * 100 + total = ((currentBusy - h.prevBusy) / deltaTotal) * 100 + + // Protect against any invalid values + if math.IsNaN(idle) || math.IsInf(idle, 0) { + idle = 100.0 + } + if math.IsNaN(user) || math.IsInf(user, 0) { + user = 0.0 + } + if math.IsNaN(system) || math.IsInf(system, 0) { + system = 0.0 + } + if math.IsNaN(total) || math.IsInf(total, 0) { + total = 0.0 + } + + h.prevIdle = currentIdle + h.prevUser = currentUser + h.prevSystem = currentSystem + h.prevTotal = currentTotal + h.prevBusy = currentBusy + return +} diff --git a/vendor/github.com/hashicorp/nomad/client/structs/broadcaster.go b/vendor/github.com/hashicorp/nomad/client/structs/broadcaster.go new file mode 100644 index 0000000..5557796 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/structs/broadcaster.go @@ -0,0 +1,171 @@ +package structs + +import ( + "errors" + "sync" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/nomad/structs" +) + +const ( + // listenerCap is the capacity of the listener chans. Must be exactly 1 + // to prevent Sends from blocking and allows them to pop old pending + // updates from the chan before enqueueing the latest update. + listenerCap = 1 +) + +var ErrAllocBroadcasterClosed = errors.New("alloc broadcaster closed") + +// AllocBroadcaster implements an allocation broadcast channel where each +// listener receives allocation updates. Pending updates are dropped and +// replaced by newer allocation updates, so listeners may not receive every +// allocation update. However this ensures Sends never block and listeners only +// receive the latest allocation update -- never a stale version. +type AllocBroadcaster struct { + mu sync.Mutex + + // listeners is a map of unique ids to listener chans. lazily + // initialized on first listen + listeners map[int]chan *structs.Allocation + + // nextId is the next id to assign in listener map + nextId int + + // closed is true if broadcsater is closed + closed bool + + // last alloc sent to prime new listeners + last *structs.Allocation + + logger hclog.Logger +} + +// NewAllocBroadcaster returns a new AllocBroadcaster. +func NewAllocBroadcaster(l hclog.Logger) *AllocBroadcaster { + return &AllocBroadcaster{ + logger: l, + } +} + +// Send broadcasts an allocation update. Any pending updates are replaced with +// this version of the allocation to prevent blocking on slow receivers. +// Returns ErrAllocBroadcasterClosed if called after broadcaster is closed. +func (b *AllocBroadcaster) Send(v *structs.Allocation) error { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return ErrAllocBroadcasterClosed + } + + b.logger.Trace("sending updated alloc", + "client_status", v.ClientStatus, + "desired_status", v.DesiredStatus, + ) + + // Store last sent alloc for future listeners + b.last = v + + // Send alloc to already created listeners + for _, l := range b.listeners { + select { + case l <- v: + case <-l: + // Pop pending update and replace with new update + l <- v + } + } + + return nil +} + +// Close closes the channel, disabling the sending of further allocation +// updates. Pending updates are still received by listeners. Safe to call +// concurrently and more than once. +func (b *AllocBroadcaster) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + + // Close all listener chans + for _, l := range b.listeners { + close(l) + } + + // Clear all references and mark broadcaster as closed + b.listeners = nil + b.closed = true +} + +// stop an individual listener +func (b *AllocBroadcaster) stop(id int) { + b.mu.Lock() + defer b.mu.Unlock() + + // If broadcaster has been closed there's nothing more to do. + if b.closed { + return + } + + l, ok := b.listeners[id] + if !ok { + // If this listener has been stopped already there's nothing + // more to do. + return + } + + close(l) + delete(b.listeners, id) +} + +// Listen returns a Listener for the broadcast channel. New listeners receive +// the last sent alloc update. +func (b *AllocBroadcaster) Listen() *AllocListener { + b.mu.Lock() + defer b.mu.Unlock() + if b.listeners == nil { + b.listeners = make(map[int]chan *structs.Allocation) + } + + for b.listeners[b.nextId] != nil { + b.nextId++ + } + + ch := make(chan *structs.Allocation, listenerCap) + + // Send last update if there was one + if b.last != nil { + ch <- b.last + } + + // Broadcaster is already closed, close this listener. Must be done + // after the last update was sent. + if b.closed { + close(ch) + } + + b.listeners[b.nextId] = ch + + return &AllocListener{ch, b, b.nextId} +} + +// AllocListener implements a listening endpoint for an allocation broadcast +// channel. +type AllocListener struct { + // ch receives the broadcast messages. + ch <-chan *structs.Allocation + b *AllocBroadcaster + id int +} + +func (l *AllocListener) Ch() <-chan *structs.Allocation { + return l.ch +} + +// Close closes the Listener, disabling the receival of further messages. Safe +// to call more than once and concurrently with receiving on Ch. +func (l *AllocListener) Close() { + l.b.stop(l.id) +} diff --git a/vendor/github.com/hashicorp/nomad/client/structs/structs.generated.go b/vendor/github.com/hashicorp/nomad/client/structs/structs.generated.go new file mode 100644 index 0000000..b69f453 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/structs/structs.generated.go @@ -0,0 +1,9373 @@ +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package structs + +import ( + "errors" + "fmt" + pkg1_stats "github.com/hashicorp/nomad/client/stats" + pkg4_structs "github.com/hashicorp/nomad/nomad/structs" + pkg2_device "github.com/hashicorp/nomad/plugins/device" + pkg3_structs "github.com/hashicorp/nomad/plugins/shared/structs" + codec1978 "github.com/ugorji/go/codec" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF8102 = 1 + codecSelferC_RAW102 = 0 + // ----- value types used ---- + codecSelferValueTypeArray102 = 10 + codecSelferValueTypeMap102 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey102 = 2 + codecSelfer_containerMapValue102 = 3 + codecSelfer_containerMapEnd102 = 4 + codecSelfer_containerArrayElem102 = 6 + codecSelfer_containerArrayEnd102 = 7 +) + +var ( + codecSelferBitsize102 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr102 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer102 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_stats.HostStats + var v1 pkg4_structs.QueryMeta + var v2 pkg2_device.DeviceGroupStats + var v3 pkg3_structs.StatValue + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 + } +} + +func (x *RpcError) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Message)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Message")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Message)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.Code == nil { + r.EncodeNil() + } else { + yy7 := *x.Code + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Code")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.Code == nil { + r.EncodeNil() + } else { + yy9 := *x.Code + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *RpcError) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *RpcError) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "Message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv4 := &x.Message + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Code": + if r.TryDecodeAsNil() { + if x.Code != nil { + x.Code = nil + } + } else { + if x.Code == nil { + x.Code = new(int64) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int64)(x.Code)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *RpcError) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv9 := &x.Message + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + if x.Code != nil { + x.Code = nil + } + } else { + if x.Code == nil { + x.Code = new(int64) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(x.Code)) = int64(r.DecodeInt(64)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *ClientStatsResponse) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.HostStats == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.HostStats) { + } else { + z.EncFallback(x.HostStats) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("HostStats")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.HostStats == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.HostStats) { + } else { + z.EncFallback(x.HostStats) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Index")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContact) { + } else { + r.EncodeInt(int64(x.LastContact)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("LastContact")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContact) { + } else { + r.EncodeInt(int64(x.LastContact)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.KnownLeader)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("KnownLeader")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.KnownLeader)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *ClientStatsResponse) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *ClientStatsResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "HostStats": + if r.TryDecodeAsNil() { + if x.HostStats != nil { + x.HostStats = nil + } + } else { + if x.HostStats == nil { + x.HostStats = new(pkg1_stats.HostStats) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.HostStats) { + } else { + z.DecFallback(x.HostStats, false) + } + } + case "Index": + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv6 := &x.Index + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + case "LastContact": + if r.TryDecodeAsNil() { + x.LastContact = 0 + } else { + yyv8 := &x.LastContact + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + *((*int64)(yyv8)) = int64(r.DecodeInt(64)) + } + } + case "KnownLeader": + if r.TryDecodeAsNil() { + x.KnownLeader = false + } else { + yyv10 := &x.KnownLeader + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *ClientStatsResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + if x.HostStats != nil { + x.HostStats = nil + } + } else { + if x.HostStats == nil { + x.HostStats = new(pkg1_stats.HostStats) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(x.HostStats) { + } else { + z.DecFallback(x.HostStats, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv15 := &x.Index + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*uint64)(yyv15)) = uint64(r.DecodeUint(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.LastContact = 0 + } else { + yyv17 := &x.LastContact + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + *((*int64)(yyv17)) = int64(r.DecodeInt(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.KnownLeader = false + } else { + yyv19 := &x.KnownLeader + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *AllocFileInfo) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Name")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.IsDir)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("IsDir")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.IsDir)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Size)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Size")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Size)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.FileMode)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("FileMode")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.FileMode)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yy16 := &x.ModTime + yym17 := z.EncBinary() + _ = yym17 + if false { + } else if yym18 := z.TimeRtidIfBinc(); yym18 != 0 { + r.EncodeBuiltin(yym18, yy16) + } else if z.HasExtensions() && z.EncExt(yy16) { + } else if yym17 { + z.EncBinaryMarshal(yy16) + } else if !yym17 && z.IsJSONHandle() { + z.EncJSONMarshal(yy16) + } else { + z.EncFallback(yy16) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("ModTime")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yy19 := &x.ModTime + yym20 := z.EncBinary() + _ = yym20 + if false { + } else if yym21 := z.TimeRtidIfBinc(); yym21 != 0 { + r.EncodeBuiltin(yym21, yy19) + } else if z.HasExtensions() && z.EncExt(yy19) { + } else if yym20 { + z.EncBinaryMarshal(yy19) + } else if !yym20 && z.IsJSONHandle() { + z.EncJSONMarshal(yy19) + } else { + z.EncFallback(yy19) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *AllocFileInfo) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *AllocFileInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "Name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "IsDir": + if r.TryDecodeAsNil() { + x.IsDir = false + } else { + yyv6 := &x.IsDir + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "Size": + if r.TryDecodeAsNil() { + x.Size = 0 + } else { + yyv8 := &x.Size + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(yyv8)) = int64(r.DecodeInt(64)) + } + } + case "FileMode": + if r.TryDecodeAsNil() { + x.FileMode = "" + } else { + yyv10 := &x.FileMode + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "ModTime": + if r.TryDecodeAsNil() { + x.ModTime = time.Time{} + } else { + yyv12 := &x.ModTime + yym13 := z.DecBinary() + _ = yym13 + if false { + } else if yym14 := z.TimeRtidIfBinc(); yym14 != 0 { + r.DecodeBuiltin(yym14, yyv12) + } else if z.HasExtensions() && z.DecExt(yyv12) { + } else if yym13 { + z.DecBinaryUnmarshal(yyv12) + } else if !yym13 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv12) + } else { + z.DecFallback(yyv12, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *AllocFileInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv16 := &x.Name + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.IsDir = false + } else { + yyv18 := &x.IsDir + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*bool)(yyv18)) = r.DecodeBool() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Size = 0 + } else { + yyv20 := &x.Size + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int64)(yyv20)) = int64(r.DecodeInt(64)) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.FileMode = "" + } else { + yyv22 := &x.FileMode + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.ModTime = time.Time{} + } else { + yyv24 := &x.ModTime + yym25 := z.DecBinary() + _ = yym25 + if false { + } else if yym26 := z.TimeRtidIfBinc(); yym26 != 0 { + r.DecodeBuiltin(yym26, yyv24) + } else if z.HasExtensions() && z.DecExt(yyv24) { + } else if yym25 { + z.DecBinaryUnmarshal(yyv24) + } else if !yym25 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv24) + } else { + z.DecFallback(yyv24, false) + } + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *FsListRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 10 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AllocID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AllocID")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AllocID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Path")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Region)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Region")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Region)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Namespace)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Namespace)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.MinQueryIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MinQueryIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.MinQueryIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxQueryTime) { + } else { + r.EncodeInt(int64(x.MaxQueryTime)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MaxQueryTime")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxQueryTime) { + } else { + r.EncodeInt(int64(x.MaxQueryTime)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.AllowStale)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AllowStale")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(x.AllowStale)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Prefix)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Prefix")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AuthToken)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AuthToken")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AuthToken)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Forwarded")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *FsListRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *FsListRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "AllocID": + if r.TryDecodeAsNil() { + x.AllocID = "" + } else { + yyv4 := &x.AllocID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Region": + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv8 := &x.Region + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "Namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv10 := &x.Namespace + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "MinQueryIndex": + if r.TryDecodeAsNil() { + x.MinQueryIndex = 0 + } else { + yyv12 := &x.MinQueryIndex + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*uint64)(yyv12)) = uint64(r.DecodeUint(64)) + } + } + case "MaxQueryTime": + if r.TryDecodeAsNil() { + x.MaxQueryTime = 0 + } else { + yyv14 := &x.MaxQueryTime + yym15 := z.DecBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.DecExt(yyv14) { + } else { + *((*int64)(yyv14)) = int64(r.DecodeInt(64)) + } + } + case "AllowStale": + if r.TryDecodeAsNil() { + x.AllowStale = false + } else { + yyv16 := &x.AllowStale + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*bool)(yyv16)) = r.DecodeBool() + } + } + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv18 := &x.Prefix + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "AuthToken": + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv20 := &x.AuthToken + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "Forwarded": + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv22 := &x.Forwarded + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(yyv22)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *FsListRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AllocID = "" + } else { + yyv25 := &x.AllocID + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv27 := &x.Path + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv29 := &x.Region + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv31 := &x.Namespace + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.MinQueryIndex = 0 + } else { + yyv33 := &x.MinQueryIndex + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*uint64)(yyv33)) = uint64(r.DecodeUint(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.MaxQueryTime = 0 + } else { + yyv35 := &x.MaxQueryTime + yym36 := z.DecBinary() + _ = yym36 + if false { + } else if z.HasExtensions() && z.DecExt(yyv35) { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AllowStale = false + } else { + yyv37 := &x.AllowStale + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*bool)(yyv37)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv39 := &x.Prefix + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv41 := &x.AuthToken + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv43 := &x.Forwarded + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*bool)(yyv43)) = r.DecodeBool() + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *FsListResponse) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.Files == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePtrtoAllocFileInfo(([]*AllocFileInfo)(x.Files), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Files")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.Files == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePtrtoAllocFileInfo(([]*AllocFileInfo)(x.Files), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Index")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContact) { + } else { + r.EncodeInt(int64(x.LastContact)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("LastContact")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContact) { + } else { + r.EncodeInt(int64(x.LastContact)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.KnownLeader)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("KnownLeader")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.KnownLeader)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *FsListResponse) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *FsListResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "Files": + if r.TryDecodeAsNil() { + x.Files = nil + } else { + yyv4 := &x.Files + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePtrtoAllocFileInfo((*[]*AllocFileInfo)(yyv4), d) + } + } + case "Index": + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv6 := &x.Index + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + case "LastContact": + if r.TryDecodeAsNil() { + x.LastContact = 0 + } else { + yyv8 := &x.LastContact + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + *((*int64)(yyv8)) = int64(r.DecodeInt(64)) + } + } + case "KnownLeader": + if r.TryDecodeAsNil() { + x.KnownLeader = false + } else { + yyv10 := &x.KnownLeader + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *FsListResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Files = nil + } else { + yyv13 := &x.Files + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSlicePtrtoAllocFileInfo((*[]*AllocFileInfo)(yyv13), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv15 := &x.Index + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*uint64)(yyv15)) = uint64(r.DecodeUint(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.LastContact = 0 + } else { + yyv17 := &x.LastContact + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + *((*int64)(yyv17)) = int64(r.DecodeInt(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.KnownLeader = false + } else { + yyv19 := &x.KnownLeader + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *FsStatRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 10 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AllocID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AllocID")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AllocID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Path")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Region)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Region")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Region)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Namespace)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Namespace)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.MinQueryIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MinQueryIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.MinQueryIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxQueryTime) { + } else { + r.EncodeInt(int64(x.MaxQueryTime)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MaxQueryTime")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxQueryTime) { + } else { + r.EncodeInt(int64(x.MaxQueryTime)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.AllowStale)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AllowStale")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(x.AllowStale)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Prefix)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Prefix")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AuthToken)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AuthToken")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AuthToken)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Forwarded")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *FsStatRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *FsStatRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "AllocID": + if r.TryDecodeAsNil() { + x.AllocID = "" + } else { + yyv4 := &x.AllocID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Region": + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv8 := &x.Region + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "Namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv10 := &x.Namespace + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "MinQueryIndex": + if r.TryDecodeAsNil() { + x.MinQueryIndex = 0 + } else { + yyv12 := &x.MinQueryIndex + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*uint64)(yyv12)) = uint64(r.DecodeUint(64)) + } + } + case "MaxQueryTime": + if r.TryDecodeAsNil() { + x.MaxQueryTime = 0 + } else { + yyv14 := &x.MaxQueryTime + yym15 := z.DecBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.DecExt(yyv14) { + } else { + *((*int64)(yyv14)) = int64(r.DecodeInt(64)) + } + } + case "AllowStale": + if r.TryDecodeAsNil() { + x.AllowStale = false + } else { + yyv16 := &x.AllowStale + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*bool)(yyv16)) = r.DecodeBool() + } + } + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv18 := &x.Prefix + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "AuthToken": + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv20 := &x.AuthToken + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "Forwarded": + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv22 := &x.Forwarded + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(yyv22)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *FsStatRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AllocID = "" + } else { + yyv25 := &x.AllocID + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv27 := &x.Path + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv29 := &x.Region + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv31 := &x.Namespace + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.MinQueryIndex = 0 + } else { + yyv33 := &x.MinQueryIndex + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*uint64)(yyv33)) = uint64(r.DecodeUint(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.MaxQueryTime = 0 + } else { + yyv35 := &x.MaxQueryTime + yym36 := z.DecBinary() + _ = yym36 + if false { + } else if z.HasExtensions() && z.DecExt(yyv35) { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AllowStale = false + } else { + yyv37 := &x.AllowStale + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*bool)(yyv37)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv39 := &x.Prefix + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv41 := &x.AuthToken + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv43 := &x.Forwarded + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*bool)(yyv43)) = r.DecodeBool() + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *FsStatResponse) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.Info == nil { + r.EncodeNil() + } else { + x.Info.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Info")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.Info == nil { + r.EncodeNil() + } else { + x.Info.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Index")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContact) { + } else { + r.EncodeInt(int64(x.LastContact)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("LastContact")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContact) { + } else { + r.EncodeInt(int64(x.LastContact)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.KnownLeader)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("KnownLeader")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.KnownLeader)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *FsStatResponse) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *FsStatResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "Info": + if r.TryDecodeAsNil() { + if x.Info != nil { + x.Info = nil + } + } else { + if x.Info == nil { + x.Info = new(AllocFileInfo) + } + x.Info.CodecDecodeSelf(d) + } + case "Index": + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv5 := &x.Index + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*uint64)(yyv5)) = uint64(r.DecodeUint(64)) + } + } + case "LastContact": + if r.TryDecodeAsNil() { + x.LastContact = 0 + } else { + yyv7 := &x.LastContact + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + case "KnownLeader": + if r.TryDecodeAsNil() { + x.KnownLeader = false + } else { + yyv9 := &x.KnownLeader + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *FsStatResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + if x.Info != nil { + x.Info = nil + } + } else { + if x.Info == nil { + x.Info = new(AllocFileInfo) + } + x.Info.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv13 := &x.Index + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.LastContact = 0 + } else { + yyv15 := &x.LastContact + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else { + *((*int64)(yyv15)) = int64(r.DecodeInt(64)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.KnownLeader = false + } else { + yyv17 := &x.KnownLeader + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *FsStreamRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [15]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(15) + } else { + yynn2 = 15 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AllocID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AllocID")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AllocID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Path")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Offset)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Offset")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Offset)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Origin)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Origin")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Origin)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.PlainText)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("PlainText")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.PlainText)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.Limit)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Limit")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.Limit)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.Follow)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Follow")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(x.Follow)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Region)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Region")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Region)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Namespace)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Namespace)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeUint(uint64(x.MinQueryIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MinQueryIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeUint(uint64(x.MinQueryIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym34 := z.EncBinary() + _ = yym34 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxQueryTime) { + } else { + r.EncodeInt(int64(x.MaxQueryTime)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MaxQueryTime")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym35 := z.EncBinary() + _ = yym35 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxQueryTime) { + } else { + r.EncodeInt(int64(x.MaxQueryTime)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym37 := z.EncBinary() + _ = yym37 + if false { + } else { + r.EncodeBool(bool(x.AllowStale)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AllowStale")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + r.EncodeBool(bool(x.AllowStale)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym40 := z.EncBinary() + _ = yym40 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Prefix)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Prefix")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym43 := z.EncBinary() + _ = yym43 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AuthToken)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AuthToken")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AuthToken)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym46 := z.EncBinary() + _ = yym46 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Forwarded")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym47 := z.EncBinary() + _ = yym47 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *FsStreamRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *FsStreamRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "AllocID": + if r.TryDecodeAsNil() { + x.AllocID = "" + } else { + yyv4 := &x.AllocID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Offset": + if r.TryDecodeAsNil() { + x.Offset = 0 + } else { + yyv8 := &x.Offset + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(yyv8)) = int64(r.DecodeInt(64)) + } + } + case "Origin": + if r.TryDecodeAsNil() { + x.Origin = "" + } else { + yyv10 := &x.Origin + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "PlainText": + if r.TryDecodeAsNil() { + x.PlainText = false + } else { + yyv12 := &x.PlainText + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "Limit": + if r.TryDecodeAsNil() { + x.Limit = 0 + } else { + yyv14 := &x.Limit + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int64)(yyv14)) = int64(r.DecodeInt(64)) + } + } + case "Follow": + if r.TryDecodeAsNil() { + x.Follow = false + } else { + yyv16 := &x.Follow + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*bool)(yyv16)) = r.DecodeBool() + } + } + case "Region": + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv18 := &x.Region + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "Namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv20 := &x.Namespace + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "MinQueryIndex": + if r.TryDecodeAsNil() { + x.MinQueryIndex = 0 + } else { + yyv22 := &x.MinQueryIndex + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*uint64)(yyv22)) = uint64(r.DecodeUint(64)) + } + } + case "MaxQueryTime": + if r.TryDecodeAsNil() { + x.MaxQueryTime = 0 + } else { + yyv24 := &x.MaxQueryTime + yym25 := z.DecBinary() + _ = yym25 + if false { + } else if z.HasExtensions() && z.DecExt(yyv24) { + } else { + *((*int64)(yyv24)) = int64(r.DecodeInt(64)) + } + } + case "AllowStale": + if r.TryDecodeAsNil() { + x.AllowStale = false + } else { + yyv26 := &x.AllowStale + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*bool)(yyv26)) = r.DecodeBool() + } + } + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv28 := &x.Prefix + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + case "AuthToken": + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv30 := &x.AuthToken + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + case "Forwarded": + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv32 := &x.Forwarded + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*bool)(yyv32)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *FsStreamRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj34 int + var yyb34 bool + var yyhl34 bool = l >= 0 + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AllocID = "" + } else { + yyv35 := &x.AllocID + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv37 := &x.Path + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Offset = 0 + } else { + yyv39 := &x.Offset + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*int64)(yyv39)) = int64(r.DecodeInt(64)) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Origin = "" + } else { + yyv41 := &x.Origin + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.PlainText = false + } else { + yyv43 := &x.PlainText + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*bool)(yyv43)) = r.DecodeBool() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Limit = 0 + } else { + yyv45 := &x.Limit + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + *((*int64)(yyv45)) = int64(r.DecodeInt(64)) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Follow = false + } else { + yyv47 := &x.Follow + yym48 := z.DecBinary() + _ = yym48 + if false { + } else { + *((*bool)(yyv47)) = r.DecodeBool() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv49 := &x.Region + yym50 := z.DecBinary() + _ = yym50 + if false { + } else { + *((*string)(yyv49)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv51 := &x.Namespace + yym52 := z.DecBinary() + _ = yym52 + if false { + } else { + *((*string)(yyv51)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.MinQueryIndex = 0 + } else { + yyv53 := &x.MinQueryIndex + yym54 := z.DecBinary() + _ = yym54 + if false { + } else { + *((*uint64)(yyv53)) = uint64(r.DecodeUint(64)) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.MaxQueryTime = 0 + } else { + yyv55 := &x.MaxQueryTime + yym56 := z.DecBinary() + _ = yym56 + if false { + } else if z.HasExtensions() && z.DecExt(yyv55) { + } else { + *((*int64)(yyv55)) = int64(r.DecodeInt(64)) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AllowStale = false + } else { + yyv57 := &x.AllowStale + yym58 := z.DecBinary() + _ = yym58 + if false { + } else { + *((*bool)(yyv57)) = r.DecodeBool() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv59 := &x.Prefix + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + *((*string)(yyv59)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv61 := &x.AuthToken + yym62 := z.DecBinary() + _ = yym62 + if false { + } else { + *((*string)(yyv61)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv63 := &x.Forwarded + yym64 := z.DecBinary() + _ = yym64 + if false { + } else { + *((*bool)(yyv63)) = r.DecodeBool() + } + } + for { + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj34-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *FsLogsRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [15]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(15) + } else { + yynn2 = 15 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AllocID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AllocID")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AllocID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Task)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Task")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Task)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.LogType)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("LogType")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.LogType)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.Offset)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Offset")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.Offset)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Origin)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Origin")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Origin)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.PlainText)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("PlainText")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.PlainText)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.Follow)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Follow")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(x.Follow)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Region)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Region")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Region)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Namespace)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Namespace)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeUint(uint64(x.MinQueryIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MinQueryIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeUint(uint64(x.MinQueryIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym34 := z.EncBinary() + _ = yym34 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxQueryTime) { + } else { + r.EncodeInt(int64(x.MaxQueryTime)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MaxQueryTime")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym35 := z.EncBinary() + _ = yym35 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxQueryTime) { + } else { + r.EncodeInt(int64(x.MaxQueryTime)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym37 := z.EncBinary() + _ = yym37 + if false { + } else { + r.EncodeBool(bool(x.AllowStale)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AllowStale")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + r.EncodeBool(bool(x.AllowStale)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym40 := z.EncBinary() + _ = yym40 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Prefix)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Prefix")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym43 := z.EncBinary() + _ = yym43 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AuthToken)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AuthToken")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AuthToken)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym46 := z.EncBinary() + _ = yym46 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Forwarded")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym47 := z.EncBinary() + _ = yym47 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *FsLogsRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *FsLogsRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "AllocID": + if r.TryDecodeAsNil() { + x.AllocID = "" + } else { + yyv4 := &x.AllocID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Task": + if r.TryDecodeAsNil() { + x.Task = "" + } else { + yyv6 := &x.Task + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "LogType": + if r.TryDecodeAsNil() { + x.LogType = "" + } else { + yyv8 := &x.LogType + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "Offset": + if r.TryDecodeAsNil() { + x.Offset = 0 + } else { + yyv10 := &x.Offset + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int64)(yyv10)) = int64(r.DecodeInt(64)) + } + } + case "Origin": + if r.TryDecodeAsNil() { + x.Origin = "" + } else { + yyv12 := &x.Origin + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "PlainText": + if r.TryDecodeAsNil() { + x.PlainText = false + } else { + yyv14 := &x.PlainText + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "Follow": + if r.TryDecodeAsNil() { + x.Follow = false + } else { + yyv16 := &x.Follow + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*bool)(yyv16)) = r.DecodeBool() + } + } + case "Region": + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv18 := &x.Region + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "Namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv20 := &x.Namespace + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "MinQueryIndex": + if r.TryDecodeAsNil() { + x.MinQueryIndex = 0 + } else { + yyv22 := &x.MinQueryIndex + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*uint64)(yyv22)) = uint64(r.DecodeUint(64)) + } + } + case "MaxQueryTime": + if r.TryDecodeAsNil() { + x.MaxQueryTime = 0 + } else { + yyv24 := &x.MaxQueryTime + yym25 := z.DecBinary() + _ = yym25 + if false { + } else if z.HasExtensions() && z.DecExt(yyv24) { + } else { + *((*int64)(yyv24)) = int64(r.DecodeInt(64)) + } + } + case "AllowStale": + if r.TryDecodeAsNil() { + x.AllowStale = false + } else { + yyv26 := &x.AllowStale + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*bool)(yyv26)) = r.DecodeBool() + } + } + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv28 := &x.Prefix + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + case "AuthToken": + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv30 := &x.AuthToken + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + case "Forwarded": + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv32 := &x.Forwarded + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*bool)(yyv32)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *FsLogsRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj34 int + var yyb34 bool + var yyhl34 bool = l >= 0 + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AllocID = "" + } else { + yyv35 := &x.AllocID + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Task = "" + } else { + yyv37 := &x.Task + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.LogType = "" + } else { + yyv39 := &x.LogType + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Offset = 0 + } else { + yyv41 := &x.Offset + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*int64)(yyv41)) = int64(r.DecodeInt(64)) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Origin = "" + } else { + yyv43 := &x.Origin + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*string)(yyv43)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.PlainText = false + } else { + yyv45 := &x.PlainText + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + *((*bool)(yyv45)) = r.DecodeBool() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Follow = false + } else { + yyv47 := &x.Follow + yym48 := z.DecBinary() + _ = yym48 + if false { + } else { + *((*bool)(yyv47)) = r.DecodeBool() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv49 := &x.Region + yym50 := z.DecBinary() + _ = yym50 + if false { + } else { + *((*string)(yyv49)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv51 := &x.Namespace + yym52 := z.DecBinary() + _ = yym52 + if false { + } else { + *((*string)(yyv51)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.MinQueryIndex = 0 + } else { + yyv53 := &x.MinQueryIndex + yym54 := z.DecBinary() + _ = yym54 + if false { + } else { + *((*uint64)(yyv53)) = uint64(r.DecodeUint(64)) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.MaxQueryTime = 0 + } else { + yyv55 := &x.MaxQueryTime + yym56 := z.DecBinary() + _ = yym56 + if false { + } else if z.HasExtensions() && z.DecExt(yyv55) { + } else { + *((*int64)(yyv55)) = int64(r.DecodeInt(64)) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AllowStale = false + } else { + yyv57 := &x.AllowStale + yym58 := z.DecBinary() + _ = yym58 + if false { + } else { + *((*bool)(yyv57)) = r.DecodeBool() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv59 := &x.Prefix + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + *((*string)(yyv59)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv61 := &x.AuthToken + yym62 := z.DecBinary() + _ = yym62 + if false { + } else { + *((*string)(yyv61)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv63 := &x.Forwarded + yym64 := z.DecBinary() + _ = yym64 + if false { + } else { + *((*bool)(yyv63)) = r.DecodeBool() + } + } + for { + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj34-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *StreamErrWrapper) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.Error == nil { + r.EncodeNil() + } else { + x.Error.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Error")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.Error == nil { + r.EncodeNil() + } else { + x.Error.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.Payload == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW102, []byte(x.Payload)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Payload")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.Payload == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW102, []byte(x.Payload)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *StreamErrWrapper) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *StreamErrWrapper) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "Error": + if r.TryDecodeAsNil() { + if x.Error != nil { + x.Error = nil + } + } else { + if x.Error == nil { + x.Error = new(RpcError) + } + x.Error.CodecDecodeSelf(d) + } + case "Payload": + if r.TryDecodeAsNil() { + x.Payload = nil + } else { + yyv5 := &x.Payload + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *yyv5 = r.DecodeBytes(*(*[]byte)(yyv5), false, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *StreamErrWrapper) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + if x.Error != nil { + x.Error = nil + } + } else { + if x.Error == nil { + x.Error = new(RpcError) + } + x.Error.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Payload = nil + } else { + yyv9 := &x.Payload + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *yyv9 = r.DecodeBytes(*(*[]byte)(yyv9), false, false) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *AllocStatsRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 10 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AllocID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AllocID")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AllocID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Task)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Task")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Task)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Region)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Region")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Region)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Namespace)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Namespace)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.MinQueryIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MinQueryIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.MinQueryIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxQueryTime) { + } else { + r.EncodeInt(int64(x.MaxQueryTime)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MaxQueryTime")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxQueryTime) { + } else { + r.EncodeInt(int64(x.MaxQueryTime)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.AllowStale)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AllowStale")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(x.AllowStale)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Prefix)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Prefix")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AuthToken)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("AuthToken")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(x.AuthToken)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Forwarded")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *AllocStatsRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *AllocStatsRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "AllocID": + if r.TryDecodeAsNil() { + x.AllocID = "" + } else { + yyv4 := &x.AllocID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Task": + if r.TryDecodeAsNil() { + x.Task = "" + } else { + yyv6 := &x.Task + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Region": + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv8 := &x.Region + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "Namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv10 := &x.Namespace + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "MinQueryIndex": + if r.TryDecodeAsNil() { + x.MinQueryIndex = 0 + } else { + yyv12 := &x.MinQueryIndex + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*uint64)(yyv12)) = uint64(r.DecodeUint(64)) + } + } + case "MaxQueryTime": + if r.TryDecodeAsNil() { + x.MaxQueryTime = 0 + } else { + yyv14 := &x.MaxQueryTime + yym15 := z.DecBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.DecExt(yyv14) { + } else { + *((*int64)(yyv14)) = int64(r.DecodeInt(64)) + } + } + case "AllowStale": + if r.TryDecodeAsNil() { + x.AllowStale = false + } else { + yyv16 := &x.AllowStale + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*bool)(yyv16)) = r.DecodeBool() + } + } + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv18 := &x.Prefix + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "AuthToken": + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv20 := &x.AuthToken + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "Forwarded": + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv22 := &x.Forwarded + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(yyv22)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *AllocStatsRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AllocID = "" + } else { + yyv25 := &x.AllocID + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Task = "" + } else { + yyv27 := &x.Task + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv29 := &x.Region + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv31 := &x.Namespace + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.MinQueryIndex = 0 + } else { + yyv33 := &x.MinQueryIndex + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*uint64)(yyv33)) = uint64(r.DecodeUint(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.MaxQueryTime = 0 + } else { + yyv35 := &x.MaxQueryTime + yym36 := z.DecBinary() + _ = yym36 + if false { + } else if z.HasExtensions() && z.DecExt(yyv35) { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AllowStale = false + } else { + yyv37 := &x.AllowStale + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*bool)(yyv37)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv39 := &x.Prefix + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv41 := &x.AuthToken + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv43 := &x.Forwarded + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*bool)(yyv43)) = r.DecodeBool() + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *AllocStatsResponse) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.Stats == nil { + r.EncodeNil() + } else { + x.Stats.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Stats")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.Stats == nil { + r.EncodeNil() + } else { + x.Stats.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Index")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContact) { + } else { + r.EncodeInt(int64(x.LastContact)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("LastContact")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContact) { + } else { + r.EncodeInt(int64(x.LastContact)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.KnownLeader)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("KnownLeader")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.KnownLeader)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *AllocStatsResponse) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *AllocStatsResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "Stats": + if r.TryDecodeAsNil() { + if x.Stats != nil { + x.Stats = nil + } + } else { + if x.Stats == nil { + x.Stats = new(AllocResourceUsage) + } + x.Stats.CodecDecodeSelf(d) + } + case "Index": + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv5 := &x.Index + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*uint64)(yyv5)) = uint64(r.DecodeUint(64)) + } + } + case "LastContact": + if r.TryDecodeAsNil() { + x.LastContact = 0 + } else { + yyv7 := &x.LastContact + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + case "KnownLeader": + if r.TryDecodeAsNil() { + x.KnownLeader = false + } else { + yyv9 := &x.KnownLeader + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *AllocStatsResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + if x.Stats != nil { + x.Stats = nil + } + } else { + if x.Stats == nil { + x.Stats = new(AllocResourceUsage) + } + x.Stats.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv13 := &x.Index + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.LastContact = 0 + } else { + yyv15 := &x.LastContact + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else { + *((*int64)(yyv15)) = int64(r.DecodeInt(64)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.KnownLeader = false + } else { + yyv17 := &x.KnownLeader + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *MemoryStats) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 8 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeUint(uint64(x.RSS)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("RSS")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeUint(uint64(x.RSS)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.Cache)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Cache")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.Cache)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeUint(uint64(x.Swap)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Swap")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeUint(uint64(x.Swap)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.Usage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Usage")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeUint(uint64(x.Usage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.MaxUsage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MaxUsage")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.MaxUsage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeUint(uint64(x.KernelUsage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("KernelUsage")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeUint(uint64(x.KernelUsage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeUint(uint64(x.KernelMaxUsage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("KernelMaxUsage")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeUint(uint64(x.KernelMaxUsage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.Measured == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + z.F.EncSliceStringV(x.Measured, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Measured")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.Measured == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + z.F.EncSliceStringV(x.Measured, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *MemoryStats) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *MemoryStats) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "RSS": + if r.TryDecodeAsNil() { + x.RSS = 0 + } else { + yyv4 := &x.RSS + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*uint64)(yyv4)) = uint64(r.DecodeUint(64)) + } + } + case "Cache": + if r.TryDecodeAsNil() { + x.Cache = 0 + } else { + yyv6 := &x.Cache + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + case "Swap": + if r.TryDecodeAsNil() { + x.Swap = 0 + } else { + yyv8 := &x.Swap + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*uint64)(yyv8)) = uint64(r.DecodeUint(64)) + } + } + case "Usage": + if r.TryDecodeAsNil() { + x.Usage = 0 + } else { + yyv10 := &x.Usage + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) + } + } + case "MaxUsage": + if r.TryDecodeAsNil() { + x.MaxUsage = 0 + } else { + yyv12 := &x.MaxUsage + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*uint64)(yyv12)) = uint64(r.DecodeUint(64)) + } + } + case "KernelUsage": + if r.TryDecodeAsNil() { + x.KernelUsage = 0 + } else { + yyv14 := &x.KernelUsage + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*uint64)(yyv14)) = uint64(r.DecodeUint(64)) + } + } + case "KernelMaxUsage": + if r.TryDecodeAsNil() { + x.KernelMaxUsage = 0 + } else { + yyv16 := &x.KernelMaxUsage + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*uint64)(yyv16)) = uint64(r.DecodeUint(64)) + } + } + case "Measured": + if r.TryDecodeAsNil() { + x.Measured = nil + } else { + yyv18 := &x.Measured + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + z.F.DecSliceStringX(yyv18, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *MemoryStats) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.RSS = 0 + } else { + yyv21 := &x.RSS + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*uint64)(yyv21)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Cache = 0 + } else { + yyv23 := &x.Cache + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*uint64)(yyv23)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Swap = 0 + } else { + yyv25 := &x.Swap + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*uint64)(yyv25)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Usage = 0 + } else { + yyv27 := &x.Usage + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*uint64)(yyv27)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.MaxUsage = 0 + } else { + yyv29 := &x.MaxUsage + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*uint64)(yyv29)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.KernelUsage = 0 + } else { + yyv31 := &x.KernelUsage + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*uint64)(yyv31)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.KernelMaxUsage = 0 + } else { + yyv33 := &x.KernelMaxUsage + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*uint64)(yyv33)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Measured = nil + } else { + yyv35 := &x.Measured + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + z.F.DecSliceStringX(yyv35, false, d) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj20-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *CpuStats) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 7 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeFloat64(float64(x.SystemMode)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("SystemMode")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeFloat64(float64(x.SystemMode)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeFloat64(float64(x.UserMode)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("UserMode")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeFloat64(float64(x.UserMode)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeFloat64(float64(x.TotalTicks)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("TotalTicks")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeFloat64(float64(x.TotalTicks)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.ThrottledPeriods)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("ThrottledPeriods")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeUint(uint64(x.ThrottledPeriods)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.ThrottledTime)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("ThrottledTime")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.ThrottledTime)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeFloat64(float64(x.Percent)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Percent")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeFloat64(float64(x.Percent)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.Measured == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + z.F.EncSliceStringV(x.Measured, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Measured")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.Measured == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + z.F.EncSliceStringV(x.Measured, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *CpuStats) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *CpuStats) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "SystemMode": + if r.TryDecodeAsNil() { + x.SystemMode = 0 + } else { + yyv4 := &x.SystemMode + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*float64)(yyv4)) = float64(r.DecodeFloat(false)) + } + } + case "UserMode": + if r.TryDecodeAsNil() { + x.UserMode = 0 + } else { + yyv6 := &x.UserMode + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*float64)(yyv6)) = float64(r.DecodeFloat(false)) + } + } + case "TotalTicks": + if r.TryDecodeAsNil() { + x.TotalTicks = 0 + } else { + yyv8 := &x.TotalTicks + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*float64)(yyv8)) = float64(r.DecodeFloat(false)) + } + } + case "ThrottledPeriods": + if r.TryDecodeAsNil() { + x.ThrottledPeriods = 0 + } else { + yyv10 := &x.ThrottledPeriods + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) + } + } + case "ThrottledTime": + if r.TryDecodeAsNil() { + x.ThrottledTime = 0 + } else { + yyv12 := &x.ThrottledTime + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*uint64)(yyv12)) = uint64(r.DecodeUint(64)) + } + } + case "Percent": + if r.TryDecodeAsNil() { + x.Percent = 0 + } else { + yyv14 := &x.Percent + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*float64)(yyv14)) = float64(r.DecodeFloat(false)) + } + } + case "Measured": + if r.TryDecodeAsNil() { + x.Measured = nil + } else { + yyv16 := &x.Measured + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecSliceStringX(yyv16, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *CpuStats) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.SystemMode = 0 + } else { + yyv19 := &x.SystemMode + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*float64)(yyv19)) = float64(r.DecodeFloat(false)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.UserMode = 0 + } else { + yyv21 := &x.UserMode + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*float64)(yyv21)) = float64(r.DecodeFloat(false)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.TotalTicks = 0 + } else { + yyv23 := &x.TotalTicks + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*float64)(yyv23)) = float64(r.DecodeFloat(false)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.ThrottledPeriods = 0 + } else { + yyv25 := &x.ThrottledPeriods + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*uint64)(yyv25)) = uint64(r.DecodeUint(64)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.ThrottledTime = 0 + } else { + yyv27 := &x.ThrottledTime + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*uint64)(yyv27)) = uint64(r.DecodeUint(64)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Percent = 0 + } else { + yyv29 := &x.Percent + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*float64)(yyv29)) = float64(r.DecodeFloat(false)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Measured = nil + } else { + yyv31 := &x.Measured + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + z.F.DecSliceStringX(yyv31, false, d) + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *ResourceUsage) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.MemoryStats == nil { + r.EncodeNil() + } else { + x.MemoryStats.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("MemoryStats")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.MemoryStats == nil { + r.EncodeNil() + } else { + x.MemoryStats.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.CpuStats == nil { + r.EncodeNil() + } else { + x.CpuStats.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("CpuStats")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.CpuStats == nil { + r.EncodeNil() + } else { + x.CpuStats.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.DeviceStats == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePtrtodevice_DeviceGroupStats(([]*pkg2_device.DeviceGroupStats)(x.DeviceStats), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("DeviceStats")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.DeviceStats == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSlicePtrtodevice_DeviceGroupStats(([]*pkg2_device.DeviceGroupStats)(x.DeviceStats), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *ResourceUsage) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *ResourceUsage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "MemoryStats": + if r.TryDecodeAsNil() { + if x.MemoryStats != nil { + x.MemoryStats = nil + } + } else { + if x.MemoryStats == nil { + x.MemoryStats = new(MemoryStats) + } + x.MemoryStats.CodecDecodeSelf(d) + } + case "CpuStats": + if r.TryDecodeAsNil() { + if x.CpuStats != nil { + x.CpuStats = nil + } + } else { + if x.CpuStats == nil { + x.CpuStats = new(CpuStats) + } + x.CpuStats.CodecDecodeSelf(d) + } + case "DeviceStats": + if r.TryDecodeAsNil() { + x.DeviceStats = nil + } else { + yyv6 := &x.DeviceStats + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSlicePtrtodevice_DeviceGroupStats((*[]*pkg2_device.DeviceGroupStats)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *ResourceUsage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + if x.MemoryStats != nil { + x.MemoryStats = nil + } + } else { + if x.MemoryStats == nil { + x.MemoryStats = new(MemoryStats) + } + x.MemoryStats.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + if x.CpuStats != nil { + x.CpuStats = nil + } + } else { + if x.CpuStats == nil { + x.CpuStats = new(CpuStats) + } + x.CpuStats.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.DeviceStats = nil + } else { + yyv11 := &x.DeviceStats + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSlicePtrtodevice_DeviceGroupStats((*[]*pkg2_device.DeviceGroupStats)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *TaskResourceUsage) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.ResourceUsage == nil { + r.EncodeNil() + } else { + x.ResourceUsage.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("ResourceUsage")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.ResourceUsage == nil { + r.EncodeNil() + } else { + x.ResourceUsage.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Timestamp)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Timestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Timestamp)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.Pids == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encMapstringPtrtoResourceUsage((map[string]*ResourceUsage)(x.Pids), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Pids")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.Pids == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encMapstringPtrtoResourceUsage((map[string]*ResourceUsage)(x.Pids), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *TaskResourceUsage) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *TaskResourceUsage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "ResourceUsage": + if r.TryDecodeAsNil() { + if x.ResourceUsage != nil { + x.ResourceUsage = nil + } + } else { + if x.ResourceUsage == nil { + x.ResourceUsage = new(ResourceUsage) + } + x.ResourceUsage.CodecDecodeSelf(d) + } + case "Timestamp": + if r.TryDecodeAsNil() { + x.Timestamp = 0 + } else { + yyv5 := &x.Timestamp + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int64)(yyv5)) = int64(r.DecodeInt(64)) + } + } + case "Pids": + if r.TryDecodeAsNil() { + x.Pids = nil + } else { + yyv7 := &x.Pids + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decMapstringPtrtoResourceUsage((*map[string]*ResourceUsage)(yyv7), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *TaskResourceUsage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + if x.ResourceUsage != nil { + x.ResourceUsage = nil + } + } else { + if x.ResourceUsage == nil { + x.ResourceUsage = new(ResourceUsage) + } + x.ResourceUsage.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Timestamp = 0 + } else { + yyv11 := &x.Timestamp + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(yyv11)) = int64(r.DecodeInt(64)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Pids = nil + } else { + yyv13 := &x.Pids + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decMapstringPtrtoResourceUsage((*map[string]*ResourceUsage)(yyv13), d) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *AllocResourceUsage) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.ResourceUsage == nil { + r.EncodeNil() + } else { + x.ResourceUsage.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("ResourceUsage")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.ResourceUsage == nil { + r.EncodeNil() + } else { + x.ResourceUsage.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.Tasks == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encMapstringPtrtoTaskResourceUsage((map[string]*TaskResourceUsage)(x.Tasks), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Tasks")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.Tasks == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encMapstringPtrtoTaskResourceUsage((map[string]*TaskResourceUsage)(x.Tasks), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Timestamp)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Timestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Timestamp)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *AllocResourceUsage) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *AllocResourceUsage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "ResourceUsage": + if r.TryDecodeAsNil() { + if x.ResourceUsage != nil { + x.ResourceUsage = nil + } + } else { + if x.ResourceUsage == nil { + x.ResourceUsage = new(ResourceUsage) + } + x.ResourceUsage.CodecDecodeSelf(d) + } + case "Tasks": + if r.TryDecodeAsNil() { + x.Tasks = nil + } else { + yyv5 := &x.Tasks + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decMapstringPtrtoTaskResourceUsage((*map[string]*TaskResourceUsage)(yyv5), d) + } + } + case "Timestamp": + if r.TryDecodeAsNil() { + x.Timestamp = 0 + } else { + yyv7 := &x.Timestamp + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *AllocResourceUsage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + if x.ResourceUsage != nil { + x.ResourceUsage = nil + } + } else { + if x.ResourceUsage == nil { + x.ResourceUsage = new(ResourceUsage) + } + x.ResourceUsage.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Tasks = nil + } else { + yyv11 := &x.Tasks + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decMapstringPtrtoTaskResourceUsage((*map[string]*TaskResourceUsage)(yyv11), d) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Timestamp = 0 + } else { + yyv13 := &x.Timestamp + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int64)(yyv13)) = int64(r.DecodeInt(64)) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *HealthCheckRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [0]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(0) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *HealthCheckRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *HealthCheckRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *HealthCheckRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj4 int + var yyb4 bool + var yyhl4 bool = l >= 0 + for { + yyj4++ + if yyhl4 { + yyb4 = yyj4 > l + } else { + yyb4 = r.CheckBreak() + } + if yyb4 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj4-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *HealthCheckResponse) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if x.Drivers == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encMapstringPtrtostructs_DriverInfo((map[string]*pkg4_structs.DriverInfo)(x.Drivers), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Drivers")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + if x.Drivers == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encMapstringPtrtostructs_DriverInfo((map[string]*pkg4_structs.DriverInfo)(x.Drivers), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *HealthCheckResponse) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *HealthCheckResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "Drivers": + if r.TryDecodeAsNil() { + x.Drivers = nil + } else { + yyv4 := &x.Drivers + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decMapstringPtrtostructs_DriverInfo((*map[string]*pkg4_structs.DriverInfo)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *HealthCheckResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Drivers = nil + } else { + yyv7 := &x.Drivers + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decMapstringPtrtostructs_DriverInfo((*map[string]*pkg4_structs.DriverInfo)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *HealthCheckIntervalRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [0]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(0) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *HealthCheckIntervalRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *HealthCheckIntervalRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *HealthCheckIntervalRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj4 int + var yyb4 bool + var yyhl4 bool = l >= 0 + for { + yyj4++ + if yyhl4 { + yyb4 = yyj4 > l + } else { + yyb4 = r.CheckBreak() + } + if yyb4 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj4-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x *HealthCheckIntervalResponse) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Eligible)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Eligible")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Eligible)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.Period) { + } else { + r.EncodeInt(int64(x.Period)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey102) + r.EncodeString(codecSelferC_UTF8102, string("Period")) + z.EncSendContainerState(codecSelfer_containerMapValue102) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.Period) { + } else { + r.EncodeInt(int64(x.Period)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd102) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd102) + } + } + } +} + +func (x *HealthCheckIntervalResponse) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap102 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd102) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray102 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr102) + } + } +} + +func (x *HealthCheckIntervalResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey102) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue102) + switch yys3 { + case "Eligible": + if r.TryDecodeAsNil() { + x.Eligible = false + } else { + yyv4 := &x.Eligible + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "Period": + if r.TryDecodeAsNil() { + x.Period = 0 + } else { + yyv6 := &x.Period + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else { + *((*int64)(yyv6)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x *HealthCheckIntervalResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Eligible = false + } else { + yyv9 := &x.Eligible + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd102) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + if r.TryDecodeAsNil() { + x.Period = 0 + } else { + yyv11 := &x.Period + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + *((*int64)(yyv11)) = int64(r.DecodeInt(64)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem102) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x codecSelfer102) encSlicePtrtoAllocFileInfo(v []*AllocFileInfo, e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x codecSelfer102) decSlicePtrtoAllocFileInfo(v *[]*AllocFileInfo, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*AllocFileInfo{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*AllocFileInfo, yyrl1) + } + } else { + yyv1 = make([]*AllocFileInfo, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = AllocFileInfo{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(AllocFileInfo) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = AllocFileInfo{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(AllocFileInfo) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *AllocFileInfo + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = AllocFileInfo{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(AllocFileInfo) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*AllocFileInfo{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer102) encSlicePtrtodevice_DeviceGroupStats(v []*pkg2_device.DeviceGroupStats, e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem102) + if yyv1 == nil { + r.EncodeNil() + } else { + yym2 := z.EncBinary() + _ = yym2 + if false { + } else if z.HasExtensions() && z.EncExt(yyv1) { + } else { + z.EncFallback(yyv1) + } + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd102) +} + +func (x codecSelfer102) decSlicePtrtodevice_DeviceGroupStats(v *[]*pkg2_device.DeviceGroupStats, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*pkg2_device.DeviceGroupStats{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*pkg2_device.DeviceGroupStats, yyrl1) + } + } else { + yyv1 = make([]*pkg2_device.DeviceGroupStats, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = pkg2_device.DeviceGroupStats{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(pkg2_device.DeviceGroupStats) + } + yyw2 := yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.DecExt(yyw2) { + } else { + z.DecFallback(yyw2, false) + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = pkg2_device.DeviceGroupStats{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(pkg2_device.DeviceGroupStats) + } + yyw4 := yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyw4) { + } else { + z.DecFallback(yyw4, false) + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *pkg2_device.DeviceGroupStats + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = pkg2_device.DeviceGroupStats{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(pkg2_device.DeviceGroupStats) + } + yyw6 := yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyw6) { + } else { + z.DecFallback(yyw6, false) + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*pkg2_device.DeviceGroupStats{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer102) encMapstringPtrtoResourceUsage(v map[string]*ResourceUsage, e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey102) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue102) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x codecSelfer102) decMapstringPtrtoResourceUsage(v *map[string]*ResourceUsage, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 24) + yyv1 = make(map[string]*ResourceUsage, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 *ResourceUsage + var yymg1, yyms1, yymok1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey102) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + yyms1 = true + if yymg1 { + yymv1, yymok1 = yyv1[yymk1] + if yymok1 { + yyms1 = false + } + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue102) + if r.TryDecodeAsNil() { + if yymv1 != nil { + *yymv1 = ResourceUsage{} + } + } else { + if yymv1 == nil { + yymv1 = new(ResourceUsage) + } + yymv1.CodecDecodeSelf(d) + } + + if yyms1 && yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey102) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + yyms1 = true + if yymg1 { + yymv1, yymok1 = yyv1[yymk1] + if yymok1 { + yyms1 = false + } + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue102) + if r.TryDecodeAsNil() { + if yymv1 != nil { + *yymv1 = ResourceUsage{} + } + } else { + if yymv1 == nil { + yymv1 = new(ResourceUsage) + } + yymv1.CodecDecodeSelf(d) + } + + if yyms1 && yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x codecSelfer102) encMapstringPtrtoTaskResourceUsage(v map[string]*TaskResourceUsage, e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey102) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue102) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x codecSelfer102) decMapstringPtrtoTaskResourceUsage(v *map[string]*TaskResourceUsage, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 24) + yyv1 = make(map[string]*TaskResourceUsage, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 *TaskResourceUsage + var yymg1, yyms1, yymok1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey102) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + yyms1 = true + if yymg1 { + yymv1, yymok1 = yyv1[yymk1] + if yymok1 { + yyms1 = false + } + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue102) + if r.TryDecodeAsNil() { + if yymv1 != nil { + *yymv1 = TaskResourceUsage{} + } + } else { + if yymv1 == nil { + yymv1 = new(TaskResourceUsage) + } + yymv1.CodecDecodeSelf(d) + } + + if yyms1 && yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey102) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + yyms1 = true + if yymg1 { + yymv1, yymok1 = yyv1[yymk1] + if yymok1 { + yyms1 = false + } + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue102) + if r.TryDecodeAsNil() { + if yymv1 != nil { + *yymv1 = TaskResourceUsage{} + } + } else { + if yymv1 == nil { + yymv1 = new(TaskResourceUsage) + } + yymv1.CodecDecodeSelf(d) + } + + if yyms1 && yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x codecSelfer102) encMapstringPtrtostructs_DriverInfo(v map[string]*pkg4_structs.DriverInfo, e *codec1978.Encoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey102) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF8102, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue102) + if yyv1 == nil { + r.EncodeNil() + } else { + yym3 := z.EncBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.EncExt(yyv1) { + } else { + z.EncFallback(yyv1) + } + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd102) +} + +func (x codecSelfer102) decMapstringPtrtostructs_DriverInfo(v *map[string]*pkg4_structs.DriverInfo, d *codec1978.Decoder) { + var h codecSelfer102 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 24) + yyv1 = make(map[string]*pkg4_structs.DriverInfo, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 *pkg4_structs.DriverInfo + var yymg1, yyms1, yymok1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey102) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + yyms1 = true + if yymg1 { + yymv1, yymok1 = yyv1[yymk1] + if yymok1 { + yyms1 = false + } + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue102) + if r.TryDecodeAsNil() { + if yymv1 != nil { + *yymv1 = pkg4_structs.DriverInfo{} + } + } else { + if yymv1 == nil { + yymv1 = new(pkg4_structs.DriverInfo) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yymv1) { + } else { + z.DecFallback(yymv1, false) + } + } + + if yyms1 && yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey102) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv6 := &yymk1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + yyms1 = true + if yymg1 { + yymv1, yymok1 = yyv1[yymk1] + if yymok1 { + yyms1 = false + } + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue102) + if r.TryDecodeAsNil() { + if yymv1 != nil { + *yymv1 = pkg4_structs.DriverInfo{} + } + } else { + if yymv1 == nil { + yymv1 = new(pkg4_structs.DriverInfo) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yymv1) { + } else { + z.DecFallback(yymv1, false) + } + } + + if yyms1 && yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd102) +} diff --git a/vendor/github.com/hashicorp/nomad/client/structs/structs.go b/vendor/github.com/hashicorp/nomad/client/structs/structs.go new file mode 100644 index 0000000..35ac598 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/structs/structs.go @@ -0,0 +1,307 @@ +package structs + +//go:generate codecgen -d 102 -o structs.generated.go structs.go + +import ( + "errors" + "time" + + "github.com/hashicorp/nomad/client/stats" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/device" +) + +// RpcError is used for serializing errors with a potential error code +type RpcError struct { + Message string + Code *int64 +} + +func NewRpcError(err error, code *int64) *RpcError { + return &RpcError{ + Message: err.Error(), + Code: code, + } +} + +func (r *RpcError) Error() string { + return r.Message +} + +// ClientStatsResponse is used to return statistics about a node. +type ClientStatsResponse struct { + HostStats *stats.HostStats + structs.QueryMeta +} + +// AllocFileInfo holds information about a file inside the AllocDir +type AllocFileInfo struct { + Name string + IsDir bool + Size int64 + FileMode string + ModTime time.Time +} + +// FsListRequest is used to list an allocation's directory. +type FsListRequest struct { + // AllocID is the allocation to list from + AllocID string + + // Path is the path to list + Path string + + structs.QueryOptions +} + +// FsListResponse is used to return the listings of an allocation's directory. +type FsListResponse struct { + // Files are the result of listing a directory. + Files []*AllocFileInfo + + structs.QueryMeta +} + +// FsStatRequest is used to stat a file +type FsStatRequest struct { + // AllocID is the allocation to stat the file in + AllocID string + + // Path is the path to list + Path string + + structs.QueryOptions +} + +// FsStatResponse is used to return the stat results of a file +type FsStatResponse struct { + // Info is the result of stating a file + Info *AllocFileInfo + + structs.QueryMeta +} + +// FsStreamRequest is the initial request for streaming the content of a file. +type FsStreamRequest struct { + // AllocID is the allocation to stream logs from + AllocID string + + // Path is the path to the file to stream + Path string + + // Offset is the offset to start streaming data at. + Offset int64 + + // Origin can either be "start" or "end" and determines where the offset is + // applied. + Origin string + + // PlainText disables base64 encoding. + PlainText bool + + // Limit is the number of bytes to read + Limit int64 + + // Follow follows the file. + Follow bool + + structs.QueryOptions +} + +// FsLogsRequest is the initial request for accessing allocation logs. +type FsLogsRequest struct { + // AllocID is the allocation to stream logs from + AllocID string + + // Task is the task to stream logs from + Task string + + // LogType indicates whether "stderr" or "stdout" should be streamed + LogType string + + // Offset is the offset to start streaming data at. + Offset int64 + + // Origin can either be "start" or "end" and determines where the offset is + // applied. + Origin string + + // PlainText disables base64 encoding. + PlainText bool + + // Follow follows logs. + Follow bool + + structs.QueryOptions +} + +// StreamErrWrapper is used to serialize output of a stream of a file or logs. +type StreamErrWrapper struct { + // Error stores any error that may have occurred. + Error *RpcError + + // Payload is the payload + Payload []byte +} + +// AllocStatsRequest is used to request the resource usage of a given +// allocation, potentially filtering by task +type AllocStatsRequest struct { + // AllocID is the allocation to retrieves stats for + AllocID string + + // Task is an optional filter to only request stats for the task. + Task string + + structs.QueryOptions +} + +// AllocStatsResponse is used to return the resource usage of a given +// allocation. +type AllocStatsResponse struct { + Stats *AllocResourceUsage + structs.QueryMeta +} + +// MemoryStats holds memory usage related stats +type MemoryStats struct { + RSS uint64 + Cache uint64 + Swap uint64 + Usage uint64 + MaxUsage uint64 + KernelUsage uint64 + KernelMaxUsage uint64 + + // A list of fields whose values were actually sampled + Measured []string +} + +func (ms *MemoryStats) Add(other *MemoryStats) { + if other == nil { + return + } + + ms.RSS += other.RSS + ms.Cache += other.Cache + ms.Swap += other.Swap + ms.Usage += other.Usage + ms.MaxUsage += other.MaxUsage + ms.KernelUsage += other.KernelUsage + ms.KernelMaxUsage += other.KernelMaxUsage + ms.Measured = joinStringSet(ms.Measured, other.Measured) +} + +// CpuStats holds cpu usage related stats +type CpuStats struct { + SystemMode float64 + UserMode float64 + TotalTicks float64 + ThrottledPeriods uint64 + ThrottledTime uint64 + Percent float64 + + // A list of fields whose values were actually sampled + Measured []string +} + +func (cs *CpuStats) Add(other *CpuStats) { + if other == nil { + return + } + + cs.SystemMode += other.SystemMode + cs.UserMode += other.UserMode + cs.TotalTicks += other.TotalTicks + cs.ThrottledPeriods += other.ThrottledPeriods + cs.ThrottledTime += other.ThrottledTime + cs.Percent += other.Percent + cs.Measured = joinStringSet(cs.Measured, other.Measured) +} + +// ResourceUsage holds information related to cpu and memory stats +type ResourceUsage struct { + MemoryStats *MemoryStats + CpuStats *CpuStats + DeviceStats []*device.DeviceGroupStats +} + +func (ru *ResourceUsage) Add(other *ResourceUsage) { + ru.MemoryStats.Add(other.MemoryStats) + ru.CpuStats.Add(other.CpuStats) + ru.DeviceStats = append(ru.DeviceStats, other.DeviceStats...) +} + +// TaskResourceUsage holds aggregated resource usage of all processes in a Task +// and the resource usage of the individual pids +type TaskResourceUsage struct { + ResourceUsage *ResourceUsage + Timestamp int64 // UnixNano + Pids map[string]*ResourceUsage +} + +// AllocResourceUsage holds the aggregated task resource usage of the +// allocation. +type AllocResourceUsage struct { + // ResourceUsage is the summation of the task resources + ResourceUsage *ResourceUsage + + // Tasks contains the resource usage of each task + Tasks map[string]*TaskResourceUsage + + // The max timestamp of all the Tasks + Timestamp int64 +} + +// joinStringSet takes two slices of strings and joins them +func joinStringSet(s1, s2 []string) []string { + lookup := make(map[string]struct{}, len(s1)) + j := make([]string, 0, len(s1)) + for _, s := range s1 { + j = append(j, s) + lookup[s] = struct{}{} + } + + for _, s := range s2 { + if _, ok := lookup[s]; !ok { + j = append(j, s) + } + } + + return j +} + +// HealthCheckRequest is the request type for a type that fulfils the Health +// Check interface +type HealthCheckRequest struct{} + +// HealthCheckResponse is the response type for a type that fulfills the Health +// Check interface +type HealthCheckResponse struct { + // Drivers is a map of driver names to current driver information + Drivers map[string]*structs.DriverInfo +} + +type HealthCheckIntervalRequest struct{} +type HealthCheckIntervalResponse struct { + Eligible bool + Period time.Duration +} + +// AddDriverInfo adds information about a driver to the fingerprint response. +// If the Drivers field has not yet been initialized, it does so here. +func (h *HealthCheckResponse) AddDriverInfo(name string, driverInfo *structs.DriverInfo) { + // initialize Drivers if it has not been already + if h.Drivers == nil { + h.Drivers = make(map[string]*structs.DriverInfo, 0) + } + + h.Drivers[name] = driverInfo +} + +// CheckBufSize is the size of the buffer that is used for job output +const CheckBufSize = 4 * 1024 + +// DriverStatsNotImplemented is the error to be returned if a driver doesn't +// implement stats. +var DriverStatsNotImplemented = errors.New("stats not implemented for driver") diff --git a/vendor/github.com/hashicorp/nomad/drivers/shared/eventer/eventer.go b/vendor/github.com/hashicorp/nomad/drivers/shared/eventer/eventer.go new file mode 100644 index 0000000..e98c896 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/drivers/shared/eventer/eventer.go @@ -0,0 +1,156 @@ +package eventer + +import ( + "context" + "sync" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/plugins/drivers" +) + +var ( + // DefaultSendEventTimeout is the timeout used when publishing events to consumers + DefaultSendEventTimeout = 2 * time.Second + + // ConsumerGCInterval is the interval at which garbage collection of consumers + // occures + ConsumerGCInterval = time.Minute +) + +// Eventer is a utility to control broadcast of TaskEvents to multiple consumers. +// It also implements the TaskEvents func in the DriverPlugin interface so that +// it can be embedded in a implementing driver struct. +type Eventer struct { + + // events is a channel were events to be broadcasted are sent + // This channel is never closed, because it's lifetime is tied to the + // life of the driver and closing creates some subtile race conditions + // between closing it and emitting events. + events chan *drivers.TaskEvent + + // consumers is a slice of eventConsumers to broadcast events to. + // access is gaurded by consumersLock RWMutex + consumers []*eventConsumer + consumersLock sync.RWMutex + + // ctx to allow control of event loop shutdown + ctx context.Context + + logger hclog.Logger +} + +type eventConsumer struct { + timeout time.Duration + ctx context.Context + ch chan *drivers.TaskEvent + logger hclog.Logger +} + +// NewEventer returns an Eventer with a running event loop that can be stopped +// by closing the given stop channel +func NewEventer(ctx context.Context, logger hclog.Logger) *Eventer { + e := &Eventer{ + events: make(chan *drivers.TaskEvent), + ctx: ctx, + logger: logger, + } + go e.eventLoop() + return e +} + +// eventLoop is the main logic which pulls events from the channel and broadcasts +// them to all consumers +func (e *Eventer) eventLoop() { + for { + select { + case <-e.ctx.Done(): + e.logger.Trace("task event loop shutdown") + return + case event := <-e.events: + e.iterateConsumers(event) + case <-time.After(ConsumerGCInterval): + e.gcConsumers() + } + } +} + +// iterateConsumers will iterate through all consumers and broadcast the event, +// cleaning up any consumers that have closed their context +func (e *Eventer) iterateConsumers(event *drivers.TaskEvent) { + e.consumersLock.Lock() + filtered := e.consumers[:0] + for _, consumer := range e.consumers { + + // prioritize checking if context is cancelled prior + // to attempting to forwarding events + // golang select evaluations aren't predictable + if consumer.ctx.Err() != nil { + close(consumer.ch) + continue + } + + select { + case <-time.After(consumer.timeout): + filtered = append(filtered, consumer) + e.logger.Warn("timeout sending event", "task_id", event.TaskID, "message", event.Message) + case <-consumer.ctx.Done(): + // consumer context finished, filtering it out of loop + close(consumer.ch) + case consumer.ch <- event: + filtered = append(filtered, consumer) + } + } + e.consumers = filtered + e.consumersLock.Unlock() +} + +func (e *Eventer) gcConsumers() { + e.consumersLock.Lock() + filtered := e.consumers[:0] + for _, consumer := range e.consumers { + select { + case <-consumer.ctx.Done(): + // consumer context finished, filtering it out of loop + default: + filtered = append(filtered, consumer) + } + } + e.consumers = filtered + e.consumersLock.Unlock() +} + +func (e *Eventer) newConsumer(ctx context.Context) *eventConsumer { + e.consumersLock.Lock() + defer e.consumersLock.Unlock() + + consumer := &eventConsumer{ + ch: make(chan *drivers.TaskEvent), + ctx: ctx, + timeout: DefaultSendEventTimeout, + logger: e.logger, + } + e.consumers = append(e.consumers, consumer) + + return consumer +} + +// TaskEvents is an implementation of the DriverPlugin.TaskEvents function +func (e *Eventer) TaskEvents(ctx context.Context) (<-chan *drivers.TaskEvent, error) { + consumer := e.newConsumer(ctx) + return consumer.ch, nil +} + +// EmitEvent can be used to broadcast a new event +func (e *Eventer) EmitEvent(event *drivers.TaskEvent) error { + + select { + case <-e.ctx.Done(): + return e.ctx.Err() + case e.events <- event: + if e.logger.IsTrace() { + e.logger.Trace("emitting event", "event", event) + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/helper/args/args.go b/vendor/github.com/hashicorp/nomad/helper/args/args.go new file mode 100644 index 0000000..fd78c37 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/args/args.go @@ -0,0 +1,33 @@ +package args + +import "regexp" + +var ( + envRe = regexp.MustCompile(`\${[a-zA-Z0-9_\-\.]+}`) +) + +// ReplaceEnv takes an arg and replaces all occurrences of environment variables. +// If the variable is found in the passed map it is replaced, otherwise the +// original string is returned. +func ReplaceEnv(arg string, environments ...map[string]string) string { + return envRe.ReplaceAllStringFunc(arg, func(arg string) string { + stripped := arg[2 : len(arg)-1] + for _, env := range environments { + if value, ok := env[stripped]; ok { + return value + } + } + + return arg + }) +} + +// ReplaceEnvWithPlaceHolder replaces all occurrences of environment variables with the placeholder string. +func ReplaceEnvWithPlaceHolder(arg string, placeholder string) string { + return envRe.ReplaceAllString(arg, placeholder) +} + +// ContainsEnv takes an arg and returns true if if contains an environment variable reference +func ContainsEnv(arg string) bool { + return envRe.MatchString(arg) +} diff --git a/vendor/github.com/hashicorp/nomad/helper/flatmap/flatmap.go b/vendor/github.com/hashicorp/nomad/helper/flatmap/flatmap.go new file mode 100644 index 0000000..4f4ec0d --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/flatmap/flatmap.go @@ -0,0 +1,129 @@ +package flatmap + +import ( + "fmt" + "reflect" +) + +// Flatten takes an object and returns a flat map of the object. The keys of the +// map is the path of the field names until a primitive field is reached and the +// value is a string representation of the terminal field. +func Flatten(obj interface{}, filter []string, primitiveOnly bool) map[string]string { + flat := make(map[string]string) + v := reflect.ValueOf(obj) + if !v.IsValid() { + return nil + } + + flatten("", v, primitiveOnly, false, flat) + for _, f := range filter { + if _, ok := flat[f]; ok { + delete(flat, f) + } + } + return flat +} + +// flatten recursively calls itself to create a flatmap representation of the +// passed value. The results are stored into the output map and the keys are +// the fields prepended with the passed prefix. +// XXX: A current restriction is that maps only support string keys. +func flatten(prefix string, v reflect.Value, primitiveOnly, enteredStruct bool, output map[string]string) { + switch v.Kind() { + case reflect.Bool: + output[prefix] = fmt.Sprintf("%v", v.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + output[prefix] = fmt.Sprintf("%v", v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + output[prefix] = fmt.Sprintf("%v", v.Uint()) + case reflect.Float32, reflect.Float64: + output[prefix] = fmt.Sprintf("%v", v.Float()) + case reflect.Complex64, reflect.Complex128: + output[prefix] = fmt.Sprintf("%v", v.Complex()) + case reflect.String: + output[prefix] = fmt.Sprintf("%v", v.String()) + case reflect.Invalid: + output[prefix] = "nil" + case reflect.Ptr: + if primitiveOnly && enteredStruct { + return + } + + e := v.Elem() + if !e.IsValid() { + output[prefix] = "nil" + } + flatten(prefix, e, primitiveOnly, enteredStruct, output) + case reflect.Map: + for _, k := range v.MapKeys() { + if k.Kind() == reflect.Interface { + k = k.Elem() + } + + if k.Kind() != reflect.String { + panic(fmt.Sprintf("%q: map key is not string: %s", prefix, k)) + } + + flatten(getSubKeyPrefix(prefix, k.String()), v.MapIndex(k), primitiveOnly, enteredStruct, output) + } + case reflect.Struct: + if primitiveOnly && enteredStruct { + return + } + enteredStruct = true + + t := v.Type() + for i := 0; i < v.NumField(); i++ { + name := t.Field(i).Name + val := v.Field(i) + if val.Kind() == reflect.Interface && !val.IsNil() { + val = val.Elem() + } + + flatten(getSubPrefix(prefix, name), val, primitiveOnly, enteredStruct, output) + } + case reflect.Interface: + if primitiveOnly { + return + } + + e := v.Elem() + if !e.IsValid() { + output[prefix] = "nil" + return + } + flatten(prefix, e, primitiveOnly, enteredStruct, output) + case reflect.Array, reflect.Slice: + if primitiveOnly { + return + } + + if v.Kind() == reflect.Slice && v.IsNil() { + output[prefix] = "nil" + return + } + for i := 0; i < v.Len(); i++ { + flatten(fmt.Sprintf("%s[%d]", prefix, i), v.Index(i), primitiveOnly, enteredStruct, output) + } + default: + panic(fmt.Sprintf("prefix %q; unsupported type %v", prefix, v.Kind())) + } +} + +// getSubPrefix takes the current prefix and the next subfield and returns an +// appropriate prefix. +func getSubPrefix(curPrefix, subField string) string { + if curPrefix != "" { + return fmt.Sprintf("%s.%s", curPrefix, subField) + } + return fmt.Sprintf("%s", subField) +} + +// getSubKeyPrefix takes the current prefix and the next subfield and returns an +// appropriate prefix for a map field. +func getSubKeyPrefix(curPrefix, subField string) string { + if curPrefix != "" { + return fmt.Sprintf("%s[%s]", curPrefix, subField) + } + return fmt.Sprintf("%s", subField) +} diff --git a/vendor/github.com/hashicorp/nomad/helper/funcs.go b/vendor/github.com/hashicorp/nomad/helper/funcs.go new file mode 100644 index 0000000..64d998a --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/funcs.go @@ -0,0 +1,364 @@ +package helper + +import ( + "crypto/sha512" + "fmt" + "regexp" + "time" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl/hcl/ast" +) + +// validUUID is used to check if a given string looks like a UUID +var validUUID = regexp.MustCompile(`(?i)^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}$`) + +// validInterpVarKey matches valid dotted variable names for interpolation. The +// string must begin with one or more non-dot characters which may be followed +// by sequences containing a dot followed by a one or more non-dot characters. +var validInterpVarKey = regexp.MustCompile(`^[^.]+(\.[^.]+)*$`) + +// IsUUID returns true if the given string is a valid UUID. +func IsUUID(str string) bool { + const uuidLen = 36 + if len(str) != uuidLen { + return false + } + + return validUUID.MatchString(str) +} + +// IsValidInterpVariable returns true if a valid dotted variable names for +// interpolation. The string must begin with one or more non-dot characters +// which may be followed by sequences containing a dot followed by a one or more +// non-dot characters. +func IsValidInterpVariable(str string) bool { + return validInterpVarKey.MatchString(str) +} + +// HashUUID takes an input UUID and returns a hashed version of the UUID to +// ensure it is well distributed. +func HashUUID(input string) (output string, hashed bool) { + if !IsUUID(input) { + return "", false + } + + // Hash the input + buf := sha512.Sum512([]byte(input)) + output = fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) + + return output, true +} + +// boolToPtr returns the pointer to a boolean +func BoolToPtr(b bool) *bool { + return &b +} + +// IntToPtr returns the pointer to an int +func IntToPtr(i int) *int { + return &i +} + +// Int8ToPtr returns the pointer to an int8 +func Int8ToPtr(i int8) *int8 { + return &i +} + +// Int64ToPtr returns the pointer to an int +func Int64ToPtr(i int64) *int64 { + return &i +} + +// Uint64ToPtr returns the pointer to an uint64 +func Uint64ToPtr(u uint64) *uint64 { + return &u +} + +// UintToPtr returns the pointer to an uint +func UintToPtr(u uint) *uint { + return &u +} + +// StringToPtr returns the pointer to a string +func StringToPtr(str string) *string { + return &str +} + +// TimeToPtr returns the pointer to a time stamp +func TimeToPtr(t time.Duration) *time.Duration { + return &t +} + +// Float64ToPtr returns the pointer to an float64 +func Float64ToPtr(f float64) *float64 { + return &f +} + +func IntMin(a, b int) int { + if a < b { + return a + } + return b +} + +func IntMax(a, b int) int { + if a > b { + return a + } + return b +} + +func Uint64Max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +// MapStringStringSliceValueSet returns the set of values in a map[string][]string +func MapStringStringSliceValueSet(m map[string][]string) []string { + set := make(map[string]struct{}) + for _, slice := range m { + for _, v := range slice { + set[v] = struct{}{} + } + } + + flat := make([]string, 0, len(set)) + for k := range set { + flat = append(flat, k) + } + return flat +} + +func SliceStringToSet(s []string) map[string]struct{} { + m := make(map[string]struct{}, (len(s)+1)/2) + for _, k := range s { + m[k] = struct{}{} + } + return m +} + +// SliceStringIsSubset returns whether the smaller set of strings is a subset of +// the larger. If the smaller slice is not a subset, the offending elements are +// returned. +func SliceStringIsSubset(larger, smaller []string) (bool, []string) { + largerSet := make(map[string]struct{}, len(larger)) + for _, l := range larger { + largerSet[l] = struct{}{} + } + + subset := true + var offending []string + for _, s := range smaller { + if _, ok := largerSet[s]; !ok { + subset = false + offending = append(offending, s) + } + } + + return subset, offending +} + +func SliceSetDisjoint(first, second []string) (bool, []string) { + contained := make(map[string]struct{}, len(first)) + for _, k := range first { + contained[k] = struct{}{} + } + + offending := make(map[string]struct{}) + for _, k := range second { + if _, ok := contained[k]; ok { + offending[k] = struct{}{} + } + } + + if len(offending) == 0 { + return true, nil + } + + flattened := make([]string, 0, len(offending)) + for k := range offending { + flattened = append(flattened, k) + } + return false, flattened +} + +// CompareMapStringString returns true if the maps are equivalent. A nil and +// empty map are considered not equal. +func CompareMapStringString(a, b map[string]string) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + + if len(a) != len(b) { + return false + } + + for k, v := range a { + v2, ok := b[k] + if !ok { + return false + } + if v != v2 { + return false + } + } + + // Already compared all known values in a so only test that keys from b + // exist in a + for k := range b { + if _, ok := a[k]; !ok { + return false + } + } + + return true +} + +// Helpers for copying generic structures. +func CopyMapStringString(m map[string]string) map[string]string { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]string, l) + for k, v := range m { + c[k] = v + } + return c +} + +func CopyMapStringStruct(m map[string]struct{}) map[string]struct{} { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]struct{}, l) + for k := range m { + c[k] = struct{}{} + } + return c +} + +func CopyMapStringInt(m map[string]int) map[string]int { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]int, l) + for k, v := range m { + c[k] = v + } + return c +} + +func CopyMapStringFloat64(m map[string]float64) map[string]float64 { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]float64, l) + for k, v := range m { + c[k] = v + } + return c +} + +// CopyMapStringSliceString copies a map of strings to string slices such as +// http.Header +func CopyMapStringSliceString(m map[string][]string) map[string][]string { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string][]string, l) + for k, v := range m { + c[k] = CopySliceString(v) + } + return c +} + +func CopySliceString(s []string) []string { + l := len(s) + if l == 0 { + return nil + } + + c := make([]string, l) + for i, v := range s { + c[i] = v + } + return c +} + +func CopySliceInt(s []int) []int { + l := len(s) + if l == 0 { + return nil + } + + c := make([]int, l) + for i, v := range s { + c[i] = v + } + return c +} + +// CleanEnvVar replaces all occurrences of illegal characters in an environment +// variable with the specified byte. +func CleanEnvVar(s string, r byte) string { + b := []byte(s) + for i, c := range b { + switch { + case c == '_': + case c == '.': + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case i > 0 && c >= '0' && c <= '9': + default: + // Replace! + b[i] = r + } + } + return string(b) +} + +func CheckHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf( + "invalid key: %s", key)) + } + } + + return result +} diff --git a/vendor/github.com/hashicorp/nomad/helper/pluginutils/grpcutils/utils.go b/vendor/github.com/hashicorp/nomad/helper/pluginutils/grpcutils/utils.go new file mode 100644 index 0000000..001cf4a --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/pluginutils/grpcutils/utils.go @@ -0,0 +1,105 @@ +package grpcutils + +import ( + "context" + "time" + + "github.com/hashicorp/nomad/plugins/base/structs" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// HandleReqCtxGrpcErr is used to handle a non io.EOF error in a GRPC request +// where a user supplied context is used. It handles detecting if the plugin has +// shutdown via the passeed pluginCtx. The parameters are: +// - err: the error returned from the streaming RPC +// - reqCtx: the user context passed to the request +// - pluginCtx: the plugins done ctx used to detect the plugin dying +// +// The return values are: +// - ErrPluginShutdown if the error is because the plugin shutdown +// - context.Canceled if the reqCtx is canceled +// - The original error +func HandleReqCtxGrpcErr(err error, reqCtx, pluginCtx context.Context) error { + if err == nil { + return nil + } + + // Determine if the error is because the plugin shutdown + if errStatus, ok := status.FromError(err); ok && + (errStatus.Code() == codes.Unavailable || errStatus.Code() == codes.Canceled) { + // Potentially wait a little before returning an error so we can detect + // the exit + select { + case <-pluginCtx.Done(): + err = structs.ErrPluginShutdown + case <-reqCtx.Done(): + err = reqCtx.Err() + + // There is no guarantee that the select will choose the + // doneCtx first so we have to double check + select { + case <-pluginCtx.Done(): + err = structs.ErrPluginShutdown + default: + } + case <-time.After(3 * time.Second): + // Its okay to wait a while since the connection isn't available and + // on local host it is likely shutting down. It is not expected for + // this to ever reach even close to 3 seconds. + } + + // It is an error we don't know how to handle, so return it + return err + } + + // Context was cancelled + if errStatus := status.FromContextError(reqCtx.Err()); errStatus.Code() == codes.Canceled { + return context.Canceled + } + + return err +} + +// HandleGrpcErr is used to handle errors made to a remote gRPC plugin. It +// handles detecting if the plugin has shutdown via the passeed pluginCtx. The +// parameters are: +// - err: the error returned from the streaming RPC +// - pluginCtx: the plugins done ctx used to detect the plugin dying +// +// The return values are: +// - ErrPluginShutdown if the error is because the plugin shutdown +// - The original error +func HandleGrpcErr(err error, pluginCtx context.Context) error { + if err == nil { + return nil + } + + if errStatus := status.FromContextError(pluginCtx.Err()); errStatus.Code() == codes.Canceled { + // See if the plugin shutdown + select { + case <-pluginCtx.Done(): + err = structs.ErrPluginShutdown + default: + } + } + + // Determine if the error is because the plugin shutdown + if errStatus, ok := status.FromError(err); ok && errStatus.Code() == codes.Unavailable { + // Potentially wait a little before returning an error so we can detect + // the exit + select { + case <-pluginCtx.Done(): + err = structs.ErrPluginShutdown + case <-time.After(3 * time.Second): + // Its okay to wait a while since the connection isn't available and + // on local host it is likely shutting down. It is not expected for + // this to ever reach even close to 3 seconds. + } + + // It is an error we don't know how to handle, so return it + return err + } + + return err +} diff --git a/vendor/github.com/hashicorp/nomad/helper/stats/cpu.go b/vendor/github.com/hashicorp/nomad/helper/stats/cpu.go new file mode 100644 index 0000000..952a4dc --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/stats/cpu.go @@ -0,0 +1,81 @@ +package stats + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + multierror "github.com/hashicorp/go-multierror" + "github.com/shirou/gopsutil/cpu" +) + +const ( + // cpuInfoTimeout is the timeout used when gathering CPU info. This is used + // to override the default timeout in gopsutil which has a tendency to + // timeout on Windows. + cpuInfoTimeout = 60 * time.Second +) + +var ( + cpuMhzPerCore float64 + cpuModelName string + cpuNumCores int + cpuTotalTicks float64 + + initErr error + onceLer sync.Once +) + +func Init() error { + onceLer.Do(func() { + var merrs *multierror.Error + var err error + if cpuNumCores, err = cpu.Counts(true); err != nil { + merrs = multierror.Append(merrs, fmt.Errorf("Unable to determine the number of CPU cores available: %v", err)) + } + + var cpuInfo []cpu.InfoStat + ctx, cancel := context.WithTimeout(context.Background(), cpuInfoTimeout) + defer cancel() + if cpuInfo, err = cpu.InfoWithContext(ctx); err != nil { + merrs = multierror.Append(merrs, fmt.Errorf("Unable to obtain CPU information: %v", err)) + } + + for _, cpu := range cpuInfo { + cpuModelName = cpu.ModelName + cpuMhzPerCore = cpu.Mhz + break + } + + // Floor all of the values such that small difference don't cause the + // node to fall into a unique computed node class + cpuMhzPerCore = math.Floor(cpuMhzPerCore) + cpuTotalTicks = math.Floor(float64(cpuNumCores) * cpuMhzPerCore) + + // Set any errors that occurred + initErr = merrs.ErrorOrNil() + }) + return initErr +} + +// CPUModelName returns the number of CPU cores available +func CPUNumCores() int { + return cpuNumCores +} + +// CPUMHzPerCore returns the MHz per CPU core +func CPUMHzPerCore() float64 { + return cpuMhzPerCore +} + +// CPUModelName returns the model name of the CPU +func CPUModelName() string { + return cpuModelName +} + +// TotalTicksAvailable calculates the total Mhz available across all cores +func TotalTicksAvailable() float64 { + return cpuTotalTicks +} diff --git a/vendor/github.com/hashicorp/nomad/helper/stats/runtime.go b/vendor/github.com/hashicorp/nomad/helper/stats/runtime.go new file mode 100644 index 0000000..6aa9ee6 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/stats/runtime.go @@ -0,0 +1,18 @@ +package stats + +import ( + "runtime" + "strconv" +) + +// RuntimeStats is used to return various runtime information +func RuntimeStats() map[string]string { + return map[string]string{ + "kernel.name": runtime.GOOS, + "arch": runtime.GOARCH, + "version": runtime.Version(), + "max_procs": strconv.FormatInt(int64(runtime.GOMAXPROCS(0)), 10), + "goroutines": strconv.FormatInt(int64(runtime.NumGoroutine()), 10), + "cpu_count": strconv.FormatInt(int64(runtime.NumCPU()), 10), + } +} diff --git a/vendor/github.com/hashicorp/nomad/helper/uuid/uuid.go b/vendor/github.com/hashicorp/nomad/helper/uuid/uuid.go new file mode 100644 index 0000000..145c817 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/uuid/uuid.go @@ -0,0 +1,21 @@ +package uuid + +import ( + crand "crypto/rand" + "fmt" +) + +// Generate is used to generate a random UUID +func Generate() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} diff --git a/vendor/github.com/hashicorp/nomad/lib/kheap/score_heap.go b/vendor/github.com/hashicorp/nomad/lib/kheap/score_heap.go new file mode 100644 index 0000000..216ae70 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/lib/kheap/score_heap.go @@ -0,0 +1,76 @@ +package kheap + +import ( + "container/heap" +) + +// HeapItem is an interface type implemented by objects stored in the ScoreHeap +type HeapItem interface { + Data() interface{} // The data object + Score() float64 // Score to use as the sort criteria +} + +// A ScoreHeap implements heap.Interface and is a min heap +// that keeps the top K elements by Score. Push can be called +// with an arbitrary number of values but only the top K are stored +type ScoreHeap struct { + items []HeapItem + capacity int +} + +func NewScoreHeap(capacity uint32) *ScoreHeap { + return &ScoreHeap{capacity: int(capacity)} +} + +func (pq ScoreHeap) Len() int { return len(pq.items) } + +func (pq ScoreHeap) Less(i, j int) bool { + return pq.items[i].Score() < pq.items[j].Score() +} + +func (pq ScoreHeap) Swap(i, j int) { + pq.items[i], pq.items[j] = pq.items[j], pq.items[i] +} + +// Push implements heap.Interface and only stores +// the top K elements by Score +func (pq *ScoreHeap) Push(x interface{}) { + item := x.(HeapItem) + if len(pq.items) < pq.capacity { + pq.items = append(pq.items, item) + } else { + // Pop the lowest scoring element if this item's Score is + // greater than the min Score so far + minIndex := 0 + min := pq.items[minIndex] + if item.Score() > min.Score() { + // Replace min and heapify + pq.items[minIndex] = item + heap.Fix(pq, minIndex) + } + } +} + +// Push implements heap.Interface and returns the top K scoring +// elements in increasing order of Score. Callers must reverse the order +// of returned elements to get the top K scoring elements in descending order +func (pq *ScoreHeap) Pop() interface{} { + old := pq.items + n := len(old) + item := old[n-1] + pq.items = old[0 : n-1] + return item +} + +// GetItemsReverse returns the items in this min heap in reverse order +// sorted by score descending +func (pq *ScoreHeap) GetItemsReverse() []interface{} { + ret := make([]interface{}, pq.Len()) + i := pq.Len() - 1 + for pq.Len() > 0 { + item := heap.Pop(pq) + ret[i] = item + i-- + } + return ret +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/batch_future.go b/vendor/github.com/hashicorp/nomad/nomad/structs/batch_future.go new file mode 100644 index 0000000..c0ddc30 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/batch_future.go @@ -0,0 +1,43 @@ +package structs + +// BatchFuture is used to wait on a batch update to complete +type BatchFuture struct { + doneCh chan struct{} + err error + index uint64 +} + +// NewBatchFuture creates a new batch future +func NewBatchFuture() *BatchFuture { + return &BatchFuture{ + doneCh: make(chan struct{}), + } +} + +// Wait is used to block for the future to complete and returns the error +func (b *BatchFuture) Wait() error { + <-b.doneCh + return b.err +} + +// WaitCh is used to block for the future to complete +func (b *BatchFuture) WaitCh() <-chan struct{} { + return b.doneCh +} + +// Error is used to return the error of the batch, only after Wait() +func (b *BatchFuture) Error() error { + return b.err +} + +// Index is used to return the index of the batch, only after Wait() +func (b *BatchFuture) Index() uint64 { + return b.index +} + +// Respond is used to unblock the future +func (b *BatchFuture) Respond(index uint64, err error) { + b.index = index + b.err = err + close(b.doneCh) +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go b/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go new file mode 100644 index 0000000..63758a0 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go @@ -0,0 +1,78 @@ +package structs + +import "fmt" + +// Bitmap is a simple uncompressed bitmap +type Bitmap []byte + +// NewBitmap returns a bitmap with up to size indexes +func NewBitmap(size uint) (Bitmap, error) { + if size == 0 { + return nil, fmt.Errorf("bitmap must be positive size") + } + if size&7 != 0 { + return nil, fmt.Errorf("bitmap must be byte aligned") + } + b := make([]byte, size>>3) + return Bitmap(b), nil +} + +// Copy returns a copy of the Bitmap +func (b Bitmap) Copy() (Bitmap, error) { + if b == nil { + return nil, fmt.Errorf("can't copy nil Bitmap") + } + + raw := make([]byte, len(b)) + copy(raw, b) + return Bitmap(raw), nil +} + +// Size returns the size of the bitmap +func (b Bitmap) Size() uint { + return uint(len(b) << 3) +} + +// Set is used to set the given index of the bitmap +func (b Bitmap) Set(idx uint) { + bucket := idx >> 3 + mask := byte(1 << (idx & 7)) + b[bucket] |= mask +} + +// Unset is used to unset the given index of the bitmap +func (b Bitmap) Unset(idx uint) { + bucket := idx >> 3 + // Mask should be all ones minus the idx position + offset := 1 << (idx & 7) + mask := byte(offset ^ 0xff) + b[bucket] &= mask +} + +// Check is used to check the given index of the bitmap +func (b Bitmap) Check(idx uint) bool { + bucket := idx >> 3 + mask := byte(1 << (idx & 7)) + return (b[bucket] & mask) != 0 +} + +// Clear is used to efficiently clear the bitmap +func (b Bitmap) Clear() { + for i := range b { + b[i] = 0 + } +} + +// IndexesInRange returns the indexes in which the values are either set or unset based +// on the passed parameter in the passed range +func (b Bitmap) IndexesInRange(set bool, from, to uint) []int { + var indexes []int + for i := from; i <= to && i < b.Size(); i++ { + c := b.Check(i) + if c && set || !c && !set { + indexes = append(indexes, int(i)) + } + } + + return indexes +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/devices.go b/vendor/github.com/hashicorp/nomad/nomad/structs/devices.go new file mode 100644 index 0000000..52cc21c --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/devices.go @@ -0,0 +1,142 @@ +package structs + +// DeviceAccounter is used to account for device usage on a node. It can detect +// when a node is oversubscribed and can be used for deciding what devices are +// free +type DeviceAccounter struct { + // Devices maps a device group to its device accounter instance + Devices map[DeviceIdTuple]*DeviceAccounterInstance +} + +// DeviceAccounterInstance wraps a device and adds tracking to the instances of +// the device to determine if they are free or not. +type DeviceAccounterInstance struct { + // Device is the device being wrapped + Device *NodeDeviceResource + + // Instances is a mapping of the device IDs to their usage. + // Only a value of 0 indicates that the instance is unused. + Instances map[string]int +} + +// NewDeviceAccounter returns a new device accounter. The node is used to +// populate the set of available devices based on what healthy device instances +// exist on the node. +func NewDeviceAccounter(n *Node) *DeviceAccounter { + numDevices := 0 + var devices []*NodeDeviceResource + + // COMPAT(0.11): Remove in 0.11 + if n.NodeResources != nil { + numDevices = len(n.NodeResources.Devices) + devices = n.NodeResources.Devices + } + + d := &DeviceAccounter{ + Devices: make(map[DeviceIdTuple]*DeviceAccounterInstance, numDevices), + } + + for _, dev := range devices { + id := *dev.ID() + d.Devices[id] = &DeviceAccounterInstance{ + Device: dev, + Instances: make(map[string]int, len(dev.Instances)), + } + for _, instance := range dev.Instances { + // Skip unhealthy devices as they aren't allocatable + if !instance.Healthy { + continue + } + + d.Devices[id].Instances[instance.ID] = 0 + } + } + + return d +} + +// AddAllocs takes a set of allocations and internally marks which devices are +// used. If a device is used more than once by the set of passed allocations, +// the collision will be returned as true. +func (d *DeviceAccounter) AddAllocs(allocs []*Allocation) (collision bool) { + for _, a := range allocs { + // Filter any terminal allocation + if a.TerminalStatus() { + continue + } + + // COMPAT(0.11): Remove in 0.11 + // If the alloc doesn't have the new style resources, it can't have + // devices + if a.AllocatedResources == nil { + continue + } + + // Go through each task resource + for _, tr := range a.AllocatedResources.Tasks { + + // Go through each assigned device group + for _, device := range tr.Devices { + devID := device.ID() + + // Go through each assigned device + for _, instanceID := range device.DeviceIDs { + + // Mark that we are using the device. It may not be in the + // map if the device is no longer being fingerprinted, is + // unhealthy, etc. + if devInst, ok := d.Devices[*devID]; ok { + if i, ok := devInst.Instances[instanceID]; ok { + // Mark that the device is in use + devInst.Instances[instanceID]++ + + if i != 0 { + collision = true + } + } + } + } + } + } + } + + return +} + +// AddReserved marks the device instances in the passed device reservation as +// used and returns if there is a collision. +func (d *DeviceAccounter) AddReserved(res *AllocatedDeviceResource) (collision bool) { + // Lookup the device. + devInst, ok := d.Devices[*res.ID()] + if !ok { + return false + } + + // For each reserved instance, mark it as used + for _, id := range res.DeviceIDs { + cur, ok := devInst.Instances[id] + if !ok { + continue + } + + // It has already been used, so mark that there is a collision + if cur != 0 { + collision = true + } + + devInst.Instances[id]++ + } + + return +} + +// FreeCount returns the number of free device instances +func (i *DeviceAccounterInstance) FreeCount() int { + count := 0 + for _, c := range i.Instances { + if c == 0 { + count++ + } + } + return count +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go new file mode 100644 index 0000000..a5bf9a1 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go @@ -0,0 +1,1381 @@ +package structs + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/nomad/helper/flatmap" + "github.com/mitchellh/hashstructure" +) + +// DiffType denotes the type of a diff object. +type DiffType string + +var ( + DiffTypeNone DiffType = "None" + DiffTypeAdded DiffType = "Added" + DiffTypeDeleted DiffType = "Deleted" + DiffTypeEdited DiffType = "Edited" +) + +func (d DiffType) Less(other DiffType) bool { + // Edited > Added > Deleted > None + // But we do a reverse sort + if d == other { + return false + } + + if d == DiffTypeEdited { + return true + } else if other == DiffTypeEdited { + return false + } else if d == DiffTypeAdded { + return true + } else if other == DiffTypeAdded { + return false + } else if d == DiffTypeDeleted { + return true + } else if other == DiffTypeDeleted { + return false + } + + return true +} + +// JobDiff contains the diff of two jobs. +type JobDiff struct { + Type DiffType + ID string + Fields []*FieldDiff + Objects []*ObjectDiff + TaskGroups []*TaskGroupDiff +} + +// Diff returns a diff of two jobs and a potential error if the Jobs are not +// diffable. If contextual diff is enabled, objects within the job will contain +// field information even if unchanged. +func (j *Job) Diff(other *Job, contextual bool) (*JobDiff, error) { + // COMPAT: Remove "Update" in 0.7.0. Update pushed down to task groups + // in 0.6.0 + diff := &JobDiff{Type: DiffTypeNone} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"ID", "Status", "StatusDescription", "Version", "Stable", "CreateIndex", + "ModifyIndex", "JobModifyIndex", "Update", "SubmitTime"} + + if j == nil && other == nil { + return diff, nil + } else if j == nil { + j = &Job{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + diff.ID = other.ID + } else if other == nil { + other = &Job{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(j, filter, true) + diff.ID = j.ID + } else { + if j.ID != other.ID { + return nil, fmt.Errorf("can not diff jobs with different IDs: %q and %q", j.ID, other.ID) + } + + oldPrimitiveFlat = flatmap.Flatten(j, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + diff.ID = other.ID + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) + + // Datacenters diff + if setDiff := stringSetDiff(j.Datacenters, other.Datacenters, "Datacenters", contextual); setDiff != nil && setDiff.Type != DiffTypeNone { + diff.Objects = append(diff.Objects, setDiff) + } + + // Constraints diff + conDiff := primitiveObjectSetDiff( + interfaceSlice(j.Constraints), + interfaceSlice(other.Constraints), + []string{"str"}, + "Constraint", + contextual) + if conDiff != nil { + diff.Objects = append(diff.Objects, conDiff...) + } + + // Affinities diff + affinitiesDiff := primitiveObjectSetDiff( + interfaceSlice(j.Affinities), + interfaceSlice(other.Affinities), + []string{"str"}, + "Affinity", + contextual) + if affinitiesDiff != nil { + diff.Objects = append(diff.Objects, affinitiesDiff...) + } + + // Task groups diff + tgs, err := taskGroupDiffs(j.TaskGroups, other.TaskGroups, contextual) + if err != nil { + return nil, err + } + diff.TaskGroups = tgs + + // Periodic diff + if pDiff := primitiveObjectDiff(j.Periodic, other.Periodic, nil, "Periodic", contextual); pDiff != nil { + diff.Objects = append(diff.Objects, pDiff) + } + + // ParameterizedJob diff + if cDiff := parameterizedJobDiff(j.ParameterizedJob, other.ParameterizedJob, contextual); cDiff != nil { + diff.Objects = append(diff.Objects, cDiff) + } + + // Check to see if there is a diff. We don't use reflect because we are + // filtering quite a few fields that will change on each diff. + if diff.Type == DiffTypeNone { + for _, fd := range diff.Fields { + if fd.Type != DiffTypeNone { + diff.Type = DiffTypeEdited + break + } + } + } + + if diff.Type == DiffTypeNone { + for _, od := range diff.Objects { + if od.Type != DiffTypeNone { + diff.Type = DiffTypeEdited + break + } + } + } + + if diff.Type == DiffTypeNone { + for _, tg := range diff.TaskGroups { + if tg.Type != DiffTypeNone { + diff.Type = DiffTypeEdited + break + } + } + } + + return diff, nil +} + +func (j *JobDiff) GoString() string { + out := fmt.Sprintf("Job %q (%s):\n", j.ID, j.Type) + + for _, f := range j.Fields { + out += fmt.Sprintf("%#v\n", f) + } + + for _, o := range j.Objects { + out += fmt.Sprintf("%#v\n", o) + } + + for _, tg := range j.TaskGroups { + out += fmt.Sprintf("%#v\n", tg) + } + + return out +} + +// TaskGroupDiff contains the diff of two task groups. +type TaskGroupDiff struct { + Type DiffType + Name string + Fields []*FieldDiff + Objects []*ObjectDiff + Tasks []*TaskDiff + Updates map[string]uint64 +} + +// Diff returns a diff of two task groups. If contextual diff is enabled, +// objects' fields will be stored even if no diff occurred as long as one field +// changed. +func (tg *TaskGroup) Diff(other *TaskGroup, contextual bool) (*TaskGroupDiff, error) { + diff := &TaskGroupDiff{Type: DiffTypeNone} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"Name"} + + if tg == nil && other == nil { + return diff, nil + } else if tg == nil { + tg = &TaskGroup{} + diff.Type = DiffTypeAdded + diff.Name = other.Name + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } else if other == nil { + other = &TaskGroup{} + diff.Type = DiffTypeDeleted + diff.Name = tg.Name + oldPrimitiveFlat = flatmap.Flatten(tg, filter, true) + } else { + if !reflect.DeepEqual(tg, other) { + diff.Type = DiffTypeEdited + } + if tg.Name != other.Name { + return nil, fmt.Errorf("can not diff task groups with different names: %q and %q", tg.Name, other.Name) + } + diff.Name = other.Name + oldPrimitiveFlat = flatmap.Flatten(tg, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) + + // Constraints diff + conDiff := primitiveObjectSetDiff( + interfaceSlice(tg.Constraints), + interfaceSlice(other.Constraints), + []string{"str"}, + "Constraint", + contextual) + if conDiff != nil { + diff.Objects = append(diff.Objects, conDiff...) + } + + // Affinities diff + affinitiesDiff := primitiveObjectSetDiff( + interfaceSlice(tg.Affinities), + interfaceSlice(other.Affinities), + []string{"str"}, + "Affinity", + contextual) + if affinitiesDiff != nil { + diff.Objects = append(diff.Objects, affinitiesDiff...) + } + + // Restart policy diff + rDiff := primitiveObjectDiff(tg.RestartPolicy, other.RestartPolicy, nil, "RestartPolicy", contextual) + if rDiff != nil { + diff.Objects = append(diff.Objects, rDiff) + } + + // Reschedule policy diff + reschedDiff := primitiveObjectDiff(tg.ReschedulePolicy, other.ReschedulePolicy, nil, "ReschedulePolicy", contextual) + if reschedDiff != nil { + diff.Objects = append(diff.Objects, reschedDiff) + } + + // EphemeralDisk diff + diskDiff := primitiveObjectDiff(tg.EphemeralDisk, other.EphemeralDisk, nil, "EphemeralDisk", contextual) + if diskDiff != nil { + diff.Objects = append(diff.Objects, diskDiff) + } + + // Update diff + // COMPAT: Remove "Stagger" in 0.7.0. + if uDiff := primitiveObjectDiff(tg.Update, other.Update, []string{"Stagger"}, "Update", contextual); uDiff != nil { + diff.Objects = append(diff.Objects, uDiff) + } + + // Tasks diff + tasks, err := taskDiffs(tg.Tasks, other.Tasks, contextual) + if err != nil { + return nil, err + } + diff.Tasks = tasks + + return diff, nil +} + +func (tg *TaskGroupDiff) GoString() string { + out := fmt.Sprintf("Group %q (%s):\n", tg.Name, tg.Type) + + if len(tg.Updates) != 0 { + out += "Updates {\n" + for update, count := range tg.Updates { + out += fmt.Sprintf("%d %s\n", count, update) + } + out += "}\n" + } + + for _, f := range tg.Fields { + out += fmt.Sprintf("%#v\n", f) + } + + for _, o := range tg.Objects { + out += fmt.Sprintf("%#v\n", o) + } + + for _, t := range tg.Tasks { + out += fmt.Sprintf("%#v\n", t) + } + + return out +} + +// TaskGroupDiffs diffs two sets of task groups. If contextual diff is enabled, +// objects' fields will be stored even if no diff occurred as long as one field +// changed. +func taskGroupDiffs(old, new []*TaskGroup, contextual bool) ([]*TaskGroupDiff, error) { + oldMap := make(map[string]*TaskGroup, len(old)) + newMap := make(map[string]*TaskGroup, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*TaskGroupDiff + for name, oldGroup := range oldMap { + // Diff the same, deleted and edited + diff, err := oldGroup.Diff(newMap[name], contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + + for name, newGroup := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + diff, err := old.Diff(newGroup, contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + } + + sort.Sort(TaskGroupDiffs(diffs)) + return diffs, nil +} + +// For sorting TaskGroupDiffs +type TaskGroupDiffs []*TaskGroupDiff + +func (tg TaskGroupDiffs) Len() int { return len(tg) } +func (tg TaskGroupDiffs) Swap(i, j int) { tg[i], tg[j] = tg[j], tg[i] } +func (tg TaskGroupDiffs) Less(i, j int) bool { return tg[i].Name < tg[j].Name } + +// TaskDiff contains the diff of two Tasks +type TaskDiff struct { + Type DiffType + Name string + Fields []*FieldDiff + Objects []*ObjectDiff + Annotations []string +} + +// Diff returns a diff of two tasks. If contextual diff is enabled, objects +// within the task will contain field information even if unchanged. +func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) { + diff := &TaskDiff{Type: DiffTypeNone} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"Name", "Config"} + + if t == nil && other == nil { + return diff, nil + } else if t == nil { + t = &Task{} + diff.Type = DiffTypeAdded + diff.Name = other.Name + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } else if other == nil { + other = &Task{} + diff.Type = DiffTypeDeleted + diff.Name = t.Name + oldPrimitiveFlat = flatmap.Flatten(t, filter, true) + } else { + if !reflect.DeepEqual(t, other) { + diff.Type = DiffTypeEdited + } + if t.Name != other.Name { + return nil, fmt.Errorf("can not diff tasks with different names: %q and %q", t.Name, other.Name) + } + diff.Name = other.Name + oldPrimitiveFlat = flatmap.Flatten(t, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) + + // Constraints diff + conDiff := primitiveObjectSetDiff( + interfaceSlice(t.Constraints), + interfaceSlice(other.Constraints), + []string{"str"}, + "Constraint", + contextual) + if conDiff != nil { + diff.Objects = append(diff.Objects, conDiff...) + } + + // Affinities diff + affinitiesDiff := primitiveObjectSetDiff( + interfaceSlice(t.Affinities), + interfaceSlice(other.Affinities), + []string{"str"}, + "Affinity", + contextual) + if affinitiesDiff != nil { + diff.Objects = append(diff.Objects, affinitiesDiff...) + } + + // Config diff + if cDiff := configDiff(t.Config, other.Config, contextual); cDiff != nil { + diff.Objects = append(diff.Objects, cDiff) + } + + // Resources diff + if rDiff := t.Resources.Diff(other.Resources, contextual); rDiff != nil { + diff.Objects = append(diff.Objects, rDiff) + } + + // LogConfig diff + lDiff := primitiveObjectDiff(t.LogConfig, other.LogConfig, nil, "LogConfig", contextual) + if lDiff != nil { + diff.Objects = append(diff.Objects, lDiff) + } + + // Dispatch payload diff + dDiff := primitiveObjectDiff(t.DispatchPayload, other.DispatchPayload, nil, "DispatchPayload", contextual) + if dDiff != nil { + diff.Objects = append(diff.Objects, dDiff) + } + + // Artifacts diff + diffs := primitiveObjectSetDiff( + interfaceSlice(t.Artifacts), + interfaceSlice(other.Artifacts), + nil, + "Artifact", + contextual) + if diffs != nil { + diff.Objects = append(diff.Objects, diffs...) + } + + // Services diff + if sDiffs := serviceDiffs(t.Services, other.Services, contextual); sDiffs != nil { + diff.Objects = append(diff.Objects, sDiffs...) + } + + // Vault diff + vDiff := vaultDiff(t.Vault, other.Vault, contextual) + if vDiff != nil { + diff.Objects = append(diff.Objects, vDiff) + } + + // Template diff + tmplDiffs := primitiveObjectSetDiff( + interfaceSlice(t.Templates), + interfaceSlice(other.Templates), + nil, + "Template", + contextual) + if tmplDiffs != nil { + diff.Objects = append(diff.Objects, tmplDiffs...) + } + + return diff, nil +} + +func (t *TaskDiff) GoString() string { + var out string + if len(t.Annotations) == 0 { + out = fmt.Sprintf("Task %q (%s):\n", t.Name, t.Type) + } else { + out = fmt.Sprintf("Task %q (%s) (%s):\n", t.Name, t.Type, strings.Join(t.Annotations, ",")) + } + + for _, f := range t.Fields { + out += fmt.Sprintf("%#v\n", f) + } + + for _, o := range t.Objects { + out += fmt.Sprintf("%#v\n", o) + } + + return out +} + +// taskDiffs diffs a set of tasks. If contextual diff is enabled, unchanged +// fields within objects nested in the tasks will be returned. +func taskDiffs(old, new []*Task, contextual bool) ([]*TaskDiff, error) { + oldMap := make(map[string]*Task, len(old)) + newMap := make(map[string]*Task, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*TaskDiff + for name, oldGroup := range oldMap { + // Diff the same, deleted and edited + diff, err := oldGroup.Diff(newMap[name], contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + + for name, newGroup := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + diff, err := old.Diff(newGroup, contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + } + + sort.Sort(TaskDiffs(diffs)) + return diffs, nil +} + +// For sorting TaskDiffs +type TaskDiffs []*TaskDiff + +func (t TaskDiffs) Len() int { return len(t) } +func (t TaskDiffs) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t TaskDiffs) Less(i, j int) bool { return t[i].Name < t[j].Name } + +// serviceDiff returns the diff of two service objects. If contextual diff is +// enabled, all fields will be returned, even if no diff occurred. +func serviceDiff(old, new *Service, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Service"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &Service{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &Service{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + if setDiff := stringSetDiff(old.CanaryTags, new.CanaryTags, "CanaryTags", contextual); setDiff != nil { + diff.Objects = append(diff.Objects, setDiff) + } + + // Tag diffs + if setDiff := stringSetDiff(old.Tags, new.Tags, "Tags", contextual); setDiff != nil { + diff.Objects = append(diff.Objects, setDiff) + } + + // Checks diffs + if cDiffs := serviceCheckDiffs(old.Checks, new.Checks, contextual); cDiffs != nil { + diff.Objects = append(diff.Objects, cDiffs...) + } + + return diff +} + +// serviceDiffs diffs a set of services. If contextual diff is enabled, unchanged +// fields within objects nested in the tasks will be returned. +func serviceDiffs(old, new []*Service, contextual bool) []*ObjectDiff { + oldMap := make(map[string]*Service, len(old)) + newMap := make(map[string]*Service, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*ObjectDiff + for name, oldService := range oldMap { + // Diff the same, deleted and edited + if diff := serviceDiff(oldService, newMap[name], contextual); diff != nil { + diffs = append(diffs, diff) + } + } + + for name, newService := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + if diff := serviceDiff(old, newService, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs +} + +// serviceCheckDiff returns the diff of two service check objects. If contextual +// diff is enabled, all fields will be returned, even if no diff occurred. +func serviceCheckDiff(old, new *ServiceCheck, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Check"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &ServiceCheck{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &ServiceCheck{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Diff Header + if headerDiff := checkHeaderDiff(old.Header, new.Header, contextual); headerDiff != nil { + diff.Objects = append(diff.Objects, headerDiff) + } + + // Diff check_restart + if crDiff := checkRestartDiff(old.CheckRestart, new.CheckRestart, contextual); crDiff != nil { + diff.Objects = append(diff.Objects, crDiff) + } + + return diff +} + +// checkHeaderDiff returns the diff of two service check header objects. If +// contextual diff is enabled, all fields will be returned, even if no diff +// occurred. +func checkHeaderDiff(old, new map[string][]string, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Header"} + var oldFlat, newFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if len(old) == 0 { + diff.Type = DiffTypeAdded + newFlat = flatmap.Flatten(new, nil, false) + } else if len(new) == 0 { + diff.Type = DiffTypeDeleted + oldFlat = flatmap.Flatten(old, nil, false) + } else { + diff.Type = DiffTypeEdited + oldFlat = flatmap.Flatten(old, nil, false) + newFlat = flatmap.Flatten(new, nil, false) + } + + diff.Fields = fieldDiffs(oldFlat, newFlat, contextual) + return diff +} + +// checkRestartDiff returns the diff of two service check check_restart +// objects. If contextual diff is enabled, all fields will be returned, even if +// no diff occurred. +func checkRestartDiff(old, new *CheckRestart, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "CheckRestart"} + var oldFlat, newFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + diff.Type = DiffTypeAdded + newFlat = flatmap.Flatten(new, nil, true) + diff.Type = DiffTypeAdded + } else if new == nil { + diff.Type = DiffTypeDeleted + oldFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldFlat = flatmap.Flatten(old, nil, true) + newFlat = flatmap.Flatten(new, nil, true) + } + + diff.Fields = fieldDiffs(oldFlat, newFlat, contextual) + return diff +} + +// serviceCheckDiffs diffs a set of service checks. If contextual diff is +// enabled, unchanged fields within objects nested in the tasks will be +// returned. +func serviceCheckDiffs(old, new []*ServiceCheck, contextual bool) []*ObjectDiff { + oldMap := make(map[string]*ServiceCheck, len(old)) + newMap := make(map[string]*ServiceCheck, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*ObjectDiff + for name, oldCheck := range oldMap { + // Diff the same, deleted and edited + if diff := serviceCheckDiff(oldCheck, newMap[name], contextual); diff != nil { + diffs = append(diffs, diff) + } + } + + for name, newCheck := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + if diff := serviceCheckDiff(old, newCheck, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs +} + +// vaultDiff returns the diff of two vault objects. If contextual diff is +// enabled, all fields will be returned, even if no diff occurred. +func vaultDiff(old, new *Vault, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Vault"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &Vault{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &Vault{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Policies diffs + if setDiff := stringSetDiff(old.Policies, new.Policies, "Policies", contextual); setDiff != nil { + diff.Objects = append(diff.Objects, setDiff) + } + + return diff +} + +// parameterizedJobDiff returns the diff of two parameterized job objects. If +// contextual diff is enabled, all fields will be returned, even if no diff +// occurred. +func parameterizedJobDiff(old, new *ParameterizedJobConfig, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "ParameterizedJob"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &ParameterizedJobConfig{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &ParameterizedJobConfig{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Meta diffs + if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "MetaOptional", contextual); optionalDiff != nil { + diff.Objects = append(diff.Objects, optionalDiff) + } + + if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "MetaRequired", contextual); requiredDiff != nil { + diff.Objects = append(diff.Objects, requiredDiff) + } + + return diff +} + +// Diff returns a diff of two resource objects. If contextual diff is enabled, +// non-changed fields will still be returned. +func (r *Resources) Diff(other *Resources, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Resources"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(r, other) { + return nil + } else if r == nil { + r = &Resources{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(other, nil, true) + } else if other == nil { + other = &Resources{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + newPrimitiveFlat = flatmap.Flatten(other, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Network Resources diff + if nDiffs := networkResourceDiffs(r.Networks, other.Networks, contextual); nDiffs != nil { + diff.Objects = append(diff.Objects, nDiffs...) + } + + // Requested Devices diff + if nDiffs := requestedDevicesDiffs(r.Devices, other.Devices, contextual); nDiffs != nil { + diff.Objects = append(diff.Objects, nDiffs...) + } + + return diff +} + +// Diff returns a diff of two network resources. If contextual diff is enabled, +// non-changed fields will still be returned. +func (r *NetworkResource) Diff(other *NetworkResource, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Network"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"Device", "CIDR", "IP"} + + if reflect.DeepEqual(r, other) { + return nil + } else if r == nil { + r = &NetworkResource{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } else if other == nil { + other = &NetworkResource{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(r, filter, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(r, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Port diffs + resPorts := portDiffs(r.ReservedPorts, other.ReservedPorts, false, contextual) + dynPorts := portDiffs(r.DynamicPorts, other.DynamicPorts, true, contextual) + if resPorts != nil { + diff.Objects = append(diff.Objects, resPorts...) + } + if dynPorts != nil { + diff.Objects = append(diff.Objects, dynPorts...) + } + + return diff +} + +// networkResourceDiffs diffs a set of NetworkResources. If contextual diff is enabled, +// non-changed fields will still be returned. +func networkResourceDiffs(old, new []*NetworkResource, contextual bool) []*ObjectDiff { + makeSet := func(objects []*NetworkResource) map[string]*NetworkResource { + objMap := make(map[string]*NetworkResource, len(objects)) + for _, obj := range objects { + hash, err := hashstructure.Hash(obj, nil) + if err != nil { + panic(err) + } + objMap[fmt.Sprintf("%d", hash)] = obj + } + + return objMap + } + + oldSet := makeSet(old) + newSet := makeSet(new) + + var diffs []*ObjectDiff + for k, oldV := range oldSet { + if newV, ok := newSet[k]; !ok { + if diff := oldV.Diff(newV, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + for k, newV := range newSet { + if oldV, ok := oldSet[k]; !ok { + if diff := oldV.Diff(newV, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs + +} + +// portDiffs returns the diff of two sets of ports. The dynamic flag marks the +// set of ports as being Dynamic ports versus Static ports. If contextual diff is enabled, +// non-changed fields will still be returned. +func portDiffs(old, new []Port, dynamic bool, contextual bool) []*ObjectDiff { + makeSet := func(ports []Port) map[string]Port { + portMap := make(map[string]Port, len(ports)) + for _, port := range ports { + portMap[port.Label] = port + } + + return portMap + } + + oldPorts := makeSet(old) + newPorts := makeSet(new) + + var filter []string + name := "Static Port" + if dynamic { + filter = []string{"Value"} + name = "Dynamic Port" + } + + var diffs []*ObjectDiff + for portLabel, oldPort := range oldPorts { + // Diff the same, deleted and edited + if newPort, ok := newPorts[portLabel]; ok { + diff := primitiveObjectDiff(oldPort, newPort, filter, name, contextual) + if diff != nil { + diffs = append(diffs, diff) + } + } else { + diff := primitiveObjectDiff(oldPort, nil, filter, name, contextual) + if diff != nil { + diffs = append(diffs, diff) + } + } + } + for label, newPort := range newPorts { + // Diff the added + if _, ok := oldPorts[label]; !ok { + diff := primitiveObjectDiff(nil, newPort, filter, name, contextual) + if diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs + +} + +// Diff returns a diff of two requested devices. If contextual diff is enabled, +// non-changed fields will still be returned. +func (r *RequestedDevice) Diff(other *RequestedDevice, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Device"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(r, other) { + return nil + } else if r == nil { + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(other, nil, true) + } else if other == nil { + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + newPrimitiveFlat = flatmap.Flatten(other, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + return diff +} + +// requestedDevicesDiffs diffs a set of RequestedDevices. If contextual diff is enabled, +// non-changed fields will still be returned. +func requestedDevicesDiffs(old, new []*RequestedDevice, contextual bool) []*ObjectDiff { + makeSet := func(devices []*RequestedDevice) map[string]*RequestedDevice { + deviceMap := make(map[string]*RequestedDevice, len(devices)) + for _, d := range devices { + deviceMap[d.Name] = d + } + + return deviceMap + } + + oldSet := makeSet(old) + newSet := makeSet(new) + + var diffs []*ObjectDiff + for k, oldV := range oldSet { + newV := newSet[k] + if diff := oldV.Diff(newV, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + for k, newV := range newSet { + if oldV, ok := oldSet[k]; !ok { + if diff := oldV.Diff(newV, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs + +} + +// configDiff returns the diff of two Task Config objects. If contextual diff is +// enabled, all fields will be returned, even if no diff occurred. +func configDiff(old, new map[string]interface{}, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Config"} + if reflect.DeepEqual(old, new) { + return nil + } else if len(old) == 0 { + diff.Type = DiffTypeAdded + } else if len(new) == 0 { + diff.Type = DiffTypeDeleted + } else { + diff.Type = DiffTypeEdited + } + + // Diff the primitive fields. + oldPrimitiveFlat := flatmap.Flatten(old, nil, false) + newPrimitiveFlat := flatmap.Flatten(new, nil, false) + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + return diff +} + +// ObjectDiff contains the diff of two generic objects. +type ObjectDiff struct { + Type DiffType + Name string + Fields []*FieldDiff + Objects []*ObjectDiff +} + +func (o *ObjectDiff) GoString() string { + out := fmt.Sprintf("\n%q (%s) {\n", o.Name, o.Type) + for _, f := range o.Fields { + out += fmt.Sprintf("%#v\n", f) + } + for _, o := range o.Objects { + out += fmt.Sprintf("%#v\n", o) + } + out += "}" + return out +} + +func (o *ObjectDiff) Less(other *ObjectDiff) bool { + if reflect.DeepEqual(o, other) { + return false + } else if other == nil { + return false + } else if o == nil { + return true + } + + if o.Name != other.Name { + return o.Name < other.Name + } + + if o.Type != other.Type { + return o.Type.Less(other.Type) + } + + if lO, lOther := len(o.Fields), len(other.Fields); lO != lOther { + return lO < lOther + } + + if lO, lOther := len(o.Objects), len(other.Objects); lO != lOther { + return lO < lOther + } + + // Check each field + sort.Sort(FieldDiffs(o.Fields)) + sort.Sort(FieldDiffs(other.Fields)) + + for i, oV := range o.Fields { + if oV.Less(other.Fields[i]) { + return true + } + } + + // Check each object + sort.Sort(ObjectDiffs(o.Objects)) + sort.Sort(ObjectDiffs(other.Objects)) + for i, oV := range o.Objects { + if oV.Less(other.Objects[i]) { + return true + } + } + + return false +} + +// For sorting ObjectDiffs +type ObjectDiffs []*ObjectDiff + +func (o ObjectDiffs) Len() int { return len(o) } +func (o ObjectDiffs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o ObjectDiffs) Less(i, j int) bool { return o[i].Less(o[j]) } + +type FieldDiff struct { + Type DiffType + Name string + Old, New string + Annotations []string +} + +// fieldDiff returns a FieldDiff if old and new are different otherwise, it +// returns nil. If contextual diff is enabled, even non-changed fields will be +// returned. +func fieldDiff(old, new, name string, contextual bool) *FieldDiff { + diff := &FieldDiff{Name: name, Type: DiffTypeNone} + if old == new { + if !contextual { + return nil + } + diff.Old, diff.New = old, new + return diff + } + + if old == "" { + diff.Type = DiffTypeAdded + diff.New = new + } else if new == "" { + diff.Type = DiffTypeDeleted + diff.Old = old + } else { + diff.Type = DiffTypeEdited + diff.Old = old + diff.New = new + } + return diff +} + +func (f *FieldDiff) GoString() string { + out := fmt.Sprintf("%q (%s): %q => %q", f.Name, f.Type, f.Old, f.New) + if len(f.Annotations) != 0 { + out += fmt.Sprintf(" (%s)", strings.Join(f.Annotations, ", ")) + } + + return out +} + +func (f *FieldDiff) Less(other *FieldDiff) bool { + if reflect.DeepEqual(f, other) { + return false + } else if other == nil { + return false + } else if f == nil { + return true + } + + if f.Name != other.Name { + return f.Name < other.Name + } else if f.Old != other.Old { + return f.Old < other.Old + } + + return f.New < other.New +} + +// For sorting FieldDiffs +type FieldDiffs []*FieldDiff + +func (f FieldDiffs) Len() int { return len(f) } +func (f FieldDiffs) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f FieldDiffs) Less(i, j int) bool { return f[i].Less(f[j]) } + +// fieldDiffs takes a map of field names to their values and returns a set of +// field diffs. If contextual diff is enabled, even non-changed fields will be +// returned. +func fieldDiffs(old, new map[string]string, contextual bool) []*FieldDiff { + var diffs []*FieldDiff + visited := make(map[string]struct{}) + for k, oldV := range old { + visited[k] = struct{}{} + newV := new[k] + if diff := fieldDiff(oldV, newV, k, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + + for k, newV := range new { + if _, ok := visited[k]; !ok { + if diff := fieldDiff("", newV, k, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(FieldDiffs(diffs)) + return diffs +} + +// stringSetDiff diffs two sets of strings with the given name. +func stringSetDiff(old, new []string, name string, contextual bool) *ObjectDiff { + oldMap := make(map[string]struct{}, len(old)) + newMap := make(map[string]struct{}, len(new)) + for _, o := range old { + oldMap[o] = struct{}{} + } + for _, n := range new { + newMap[n] = struct{}{} + } + if reflect.DeepEqual(oldMap, newMap) && !contextual { + return nil + } + + diff := &ObjectDiff{Name: name} + var added, removed bool + for k := range oldMap { + if _, ok := newMap[k]; !ok { + diff.Fields = append(diff.Fields, fieldDiff(k, "", name, contextual)) + removed = true + } else if contextual { + diff.Fields = append(diff.Fields, fieldDiff(k, k, name, contextual)) + } + } + + for k := range newMap { + if _, ok := oldMap[k]; !ok { + diff.Fields = append(diff.Fields, fieldDiff("", k, name, contextual)) + added = true + } + } + + sort.Sort(FieldDiffs(diff.Fields)) + + // Determine the type + if added && removed { + diff.Type = DiffTypeEdited + } else if added { + diff.Type = DiffTypeAdded + } else if removed { + diff.Type = DiffTypeDeleted + } else { + // Diff of an empty set + if len(diff.Fields) == 0 { + return nil + } + + diff.Type = DiffTypeNone + } + + return diff +} + +// primitiveObjectDiff returns a diff of the passed objects' primitive fields. +// The filter field can be used to exclude fields from the diff. The name is the +// name of the objects. If contextual is set, non-changed fields will also be +// stored in the object diff. +func primitiveObjectDiff(old, new interface{}, filter []string, name string, contextual bool) *ObjectDiff { + oldPrimitiveFlat := flatmap.Flatten(old, filter, true) + newPrimitiveFlat := flatmap.Flatten(new, filter, true) + delete(oldPrimitiveFlat, "") + delete(newPrimitiveFlat, "") + + diff := &ObjectDiff{Name: name} + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + var added, deleted, edited bool + for _, f := range diff.Fields { + switch f.Type { + case DiffTypeEdited: + edited = true + break + case DiffTypeDeleted: + deleted = true + case DiffTypeAdded: + added = true + } + } + + if edited || added && deleted { + diff.Type = DiffTypeEdited + } else if added { + diff.Type = DiffTypeAdded + } else if deleted { + diff.Type = DiffTypeDeleted + } else { + return nil + } + + return diff +} + +// primitiveObjectSetDiff does a set difference of the old and new sets. The +// filter parameter can be used to filter a set of primitive fields in the +// passed structs. The name corresponds to the name of the passed objects. If +// contextual diff is enabled, objects' primitive fields will be returned even if +// no diff exists. +func primitiveObjectSetDiff(old, new []interface{}, filter []string, name string, contextual bool) []*ObjectDiff { + makeSet := func(objects []interface{}) map[string]interface{} { + objMap := make(map[string]interface{}, len(objects)) + for _, obj := range objects { + hash, err := hashstructure.Hash(obj, nil) + if err != nil { + panic(err) + } + objMap[fmt.Sprintf("%d", hash)] = obj + } + + return objMap + } + + oldSet := makeSet(old) + newSet := makeSet(new) + + var diffs []*ObjectDiff + for k, v := range oldSet { + // Deleted + if _, ok := newSet[k]; !ok { + diffs = append(diffs, primitiveObjectDiff(v, nil, filter, name, contextual)) + } + } + for k, v := range newSet { + // Added + if _, ok := oldSet[k]; !ok { + diffs = append(diffs, primitiveObjectDiff(nil, v, filter, name, contextual)) + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs +} + +// interfaceSlice is a helper method that takes a slice of typed elements and +// returns a slice of interface. This method will panic if given a non-slice +// input. +func interfaceSlice(slice interface{}) []interface{} { + s := reflect.ValueOf(slice) + if s.Kind() != reflect.Slice { + panic("InterfaceSlice() given a non-slice type") + } + + ret := make([]interface{}, s.Len()) + + for i := 0; i < s.Len(); i++ { + ret[i] = s.Index(i).Interface() + } + + return ret +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/errors.go b/vendor/github.com/hashicorp/nomad/nomad/structs/errors.go new file mode 100644 index 0000000..f164612 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/errors.go @@ -0,0 +1,142 @@ +package structs + +import ( + "errors" + "fmt" + "strings" +) + +const ( + errNoLeader = "No cluster leader" + errNoRegionPath = "No path to region" + errTokenNotFound = "ACL token not found" + errPermissionDenied = "Permission denied" + errNoNodeConn = "No path to node" + errUnknownMethod = "Unknown rpc method" + errUnknownNomadVersion = "Unable to determine Nomad version" + errNodeLacksRpc = "Node does not support RPC; requires 0.8 or later" + + // Prefix based errors that are used to check if the error is of a given + // type. These errors should be created with the associated constructor. + ErrUnknownAllocationPrefix = "Unknown allocation" + ErrUnknownNodePrefix = "Unknown node" + ErrUnknownJobPrefix = "Unknown job" + ErrUnknownEvaluationPrefix = "Unknown evaluation" + ErrUnknownDeploymentPrefix = "Unknown deployment" +) + +var ( + ErrNoLeader = errors.New(errNoLeader) + ErrNoRegionPath = errors.New(errNoRegionPath) + ErrTokenNotFound = errors.New(errTokenNotFound) + ErrPermissionDenied = errors.New(errPermissionDenied) + ErrNoNodeConn = errors.New(errNoNodeConn) + ErrUnknownMethod = errors.New(errUnknownMethod) + ErrUnknownNomadVersion = errors.New(errUnknownNomadVersion) + ErrNodeLacksRpc = errors.New(errNodeLacksRpc) +) + +// IsErrNoLeader returns whether the error is due to there being no leader. +func IsErrNoLeader(err error) bool { + return err != nil && strings.Contains(err.Error(), errNoLeader) +} + +// IsErrNoRegionPath returns whether the error is due to there being no path to +// the given region. +func IsErrNoRegionPath(err error) bool { + return err != nil && strings.Contains(err.Error(), errNoRegionPath) +} + +// IsErrTokenNotFound returns whether the error is due to the passed token not +// being resolvable. +func IsErrTokenNotFound(err error) bool { + return err != nil && strings.Contains(err.Error(), errTokenNotFound) +} + +// IsErrPermissionDenied returns whether the error is due to the operation not +// being allowed due to lack of permissions. +func IsErrPermissionDenied(err error) bool { + return err != nil && strings.Contains(err.Error(), errPermissionDenied) +} + +// IsErrNoNodeConn returns whether the error is due to there being no path to +// the given node. +func IsErrNoNodeConn(err error) bool { + return err != nil && strings.Contains(err.Error(), errNoNodeConn) +} + +// IsErrUnknownMethod returns whether the error is due to the operation not +// being allowed due to lack of permissions. +func IsErrUnknownMethod(err error) bool { + return err != nil && strings.Contains(err.Error(), errUnknownMethod) +} + +// NewErrUnknownAllocation returns a new error caused by the allocation being +// unknown. +func NewErrUnknownAllocation(allocID string) error { + return fmt.Errorf("%s %q", ErrUnknownAllocationPrefix, allocID) +} + +// NewErrUnknownNode returns a new error caused by the node being unknown. +func NewErrUnknownNode(nodeID string) error { + return fmt.Errorf("%s %q", ErrUnknownNodePrefix, nodeID) +} + +// NewErrUnknownJob returns a new error caused by the job being unknown. +func NewErrUnknownJob(jobID string) error { + return fmt.Errorf("%s %q", ErrUnknownJobPrefix, jobID) +} + +// NewErrUnknownEvaluation returns a new error caused by the evaluation being +// unknown. +func NewErrUnknownEvaluation(evaluationID string) error { + return fmt.Errorf("%s %q", ErrUnknownEvaluationPrefix, evaluationID) +} + +// NewErrUnknownDeployment returns a new error caused by the deployment being +// unknown. +func NewErrUnknownDeployment(deploymentID string) error { + return fmt.Errorf("%s %q", ErrUnknownDeploymentPrefix, deploymentID) +} + +// IsErrUnknownAllocation returns whether the error is due to an unknown +// allocation. +func IsErrUnknownAllocation(err error) bool { + return err != nil && strings.Contains(err.Error(), ErrUnknownAllocationPrefix) +} + +// IsErrUnknownNode returns whether the error is due to an unknown +// node. +func IsErrUnknownNode(err error) bool { + return err != nil && strings.Contains(err.Error(), ErrUnknownNodePrefix) +} + +// IsErrUnknownJob returns whether the error is due to an unknown +// job. +func IsErrUnknownJob(err error) bool { + return err != nil && strings.Contains(err.Error(), ErrUnknownJobPrefix) +} + +// IsErrUnknownEvaluation returns whether the error is due to an unknown +// evaluation. +func IsErrUnknownEvaluation(err error) bool { + return err != nil && strings.Contains(err.Error(), ErrUnknownEvaluationPrefix) +} + +// IsErrUnknownDeployment returns whether the error is due to an unknown +// deployment. +func IsErrUnknownDeployment(err error) bool { + return err != nil && strings.Contains(err.Error(), ErrUnknownDeploymentPrefix) +} + +// IsErrUnknownNomadVersion returns whether the error is due to Nomad being +// unable to determine the version of a node. +func IsErrUnknownNomadVersion(err error) bool { + return err != nil && strings.Contains(err.Error(), errUnknownNomadVersion) +} + +// IsErrNodeLacksRpc returns whether error is due to a Nomad server being +// unable to connect to a client node because the client is too old (pre-v0.8). +func IsErrNodeLacksRpc(err error) bool { + return err != nil && strings.Contains(err.Error(), errNodeLacksRpc) +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go new file mode 100644 index 0000000..f79cb6c --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go @@ -0,0 +1,431 @@ +package structs + +import ( + "crypto/subtle" + "encoding/base64" + "encoding/binary" + "fmt" + "math" + "sort" + "strconv" + "strings" + + multierror "github.com/hashicorp/go-multierror" + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/nomad/acl" + "golang.org/x/crypto/blake2b" +) + +// MergeMultierrorWarnings takes job warnings and canonicalize warnings and +// merges them into a returnable string. Both the errors may be nil. +func MergeMultierrorWarnings(warnings ...error) string { + var warningMsg multierror.Error + for _, warn := range warnings { + if warn != nil { + multierror.Append(&warningMsg, warn) + } + } + + if len(warningMsg.Errors) == 0 { + return "" + } + + // Set the formatter + warningMsg.ErrorFormat = warningsFormatter + return warningMsg.Error() +} + +// warningsFormatter is used to format job warnings +func warningsFormatter(es []error) string { + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d warning(s):\n\n%s", + len(es), strings.Join(points, "\n")) +} + +// RemoveAllocs is used to remove any allocs with the given IDs +// from the list of allocations +func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation { + // Convert remove into a set + removeSet := make(map[string]struct{}) + for _, remove := range remove { + removeSet[remove.ID] = struct{}{} + } + + n := len(alloc) + for i := 0; i < n; i++ { + if _, ok := removeSet[alloc[i].ID]; ok { + alloc[i], alloc[n-1] = alloc[n-1], nil + i-- + n-- + } + } + + alloc = alloc[:n] + return alloc +} + +// FilterTerminalAllocs filters out all allocations in a terminal state and +// returns the latest terminal allocations +func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) { + terminalAllocsByName := make(map[string]*Allocation) + n := len(allocs) + for i := 0; i < n; i++ { + if allocs[i].TerminalStatus() { + + // Add the allocation to the terminal allocs map if it's not already + // added or has a higher create index than the one which is + // currently present. + alloc, ok := terminalAllocsByName[allocs[i].Name] + if !ok || alloc.CreateIndex < allocs[i].CreateIndex { + terminalAllocsByName[allocs[i].Name] = allocs[i] + } + + // Remove the allocation + allocs[i], allocs[n-1] = allocs[n-1], nil + i-- + n-- + } + } + return allocs[:n], terminalAllocsByName +} + +// AllocsFit checks if a given set of allocations will fit on a node. +// The netIdx can optionally be provided if its already been computed. +// If the netIdx is provided, it is assumed that the client has already +// ensured there are no collisions. If checkDevices is set to true, we check if +// there is a device oversubscription. +func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex, checkDevices bool) (bool, string, *ComparableResources, error) { + // Compute the utilization from zero + used := new(ComparableResources) + + // Add the reserved resources of the node + used.Add(node.ComparableReservedResources()) + + // For each alloc, add the resources + for _, alloc := range allocs { + // Do not consider the resource impact of terminal allocations + if alloc.TerminalStatus() { + continue + } + + used.Add(alloc.ComparableResources()) + } + + // Check that the node resources are a super set of those + // that are being allocated + if superset, dimension := node.ComparableResources().Superset(used); !superset { + return false, dimension, used, nil + } + + // Create the network index if missing + if netIdx == nil { + netIdx = NewNetworkIndex() + defer netIdx.Release() + if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) { + return false, "reserved port collision", used, nil + } + } + + // Check if the network is overcommitted + if netIdx.Overcommitted() { + return false, "bandwidth exceeded", used, nil + } + + // Check devices + if checkDevices { + accounter := NewDeviceAccounter(node) + if accounter.AddAllocs(allocs) { + return false, "device oversubscribed", used, nil + } + } + + // Allocations fit! + return true, "", used, nil +} + +// ScoreFit is used to score the fit based on the Google work published here: +// http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt +// This is equivalent to their BestFit v3 +func ScoreFit(node *Node, util *ComparableResources) float64 { + // COMPAT(0.11): Remove in 0.11 + reserved := node.ComparableReservedResources() + res := node.ComparableResources() + + // Determine the node availability + nodeCpu := float64(res.Flattened.Cpu.CpuShares) + nodeMem := float64(res.Flattened.Memory.MemoryMB) + if reserved != nil { + nodeCpu -= float64(reserved.Flattened.Cpu.CpuShares) + nodeMem -= float64(reserved.Flattened.Memory.MemoryMB) + } + + // Compute the free percentage + freePctCpu := 1 - (float64(util.Flattened.Cpu.CpuShares) / nodeCpu) + freePctRam := 1 - (float64(util.Flattened.Memory.MemoryMB) / nodeMem) + + // Total will be "maximized" the smaller the value is. + // At 100% utilization, the total is 2, while at 0% util it is 20. + total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam) + + // Invert so that the "maximized" total represents a high-value + // score. Because the floor is 20, we simply use that as an anchor. + // This means at a perfect fit, we return 18 as the score. + score := 20.0 - total + + // Bound the score, just in case + // If the score is over 18, that means we've overfit the node. + if score > 18.0 { + score = 18.0 + } else if score < 0 { + score = 0 + } + return score +} + +func CopySliceConstraints(s []*Constraint) []*Constraint { + l := len(s) + if l == 0 { + return nil + } + + c := make([]*Constraint, l) + for i, v := range s { + c[i] = v.Copy() + } + return c +} + +func CopySliceAffinities(s []*Affinity) []*Affinity { + l := len(s) + if l == 0 { + return nil + } + + c := make([]*Affinity, l) + for i, v := range s { + c[i] = v.Copy() + } + return c +} + +func CopySliceSpreads(s []*Spread) []*Spread { + l := len(s) + if l == 0 { + return nil + } + + c := make([]*Spread, l) + for i, v := range s { + c[i] = v.Copy() + } + return c +} + +func CopySliceSpreadTarget(s []*SpreadTarget) []*SpreadTarget { + l := len(s) + if l == 0 { + return nil + } + + c := make([]*SpreadTarget, l) + for i, v := range s { + c[i] = v.Copy() + } + return c +} + +func CopySliceNodeScoreMeta(s []*NodeScoreMeta) []*NodeScoreMeta { + l := len(s) + if l == 0 { + return nil + } + + c := make([]*NodeScoreMeta, l) + for i, v := range s { + c[i] = v.Copy() + } + return c +} + +// VaultPoliciesSet takes the structure returned by VaultPolicies and returns +// the set of required policies +func VaultPoliciesSet(policies map[string]map[string]*Vault) []string { + set := make(map[string]struct{}) + + for _, tgp := range policies { + for _, tp := range tgp { + for _, p := range tp.Policies { + set[p] = struct{}{} + } + } + } + + flattened := make([]string, 0, len(set)) + for p := range set { + flattened = append(flattened, p) + } + return flattened +} + +// DenormalizeAllocationJobs is used to attach a job to all allocations that are +// non-terminal and do not have a job already. This is useful in cases where the +// job is normalized. +func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) { + if job != nil { + for _, alloc := range allocs { + if alloc.Job == nil && !alloc.TerminalStatus() { + alloc.Job = job + } + } + } +} + +// AllocName returns the name of the allocation given the input. +func AllocName(job, group string, idx uint) string { + return fmt.Sprintf("%s.%s[%d]", job, group, idx) +} + +// ACLPolicyListHash returns a consistent hash for a set of policies. +func ACLPolicyListHash(policies []*ACLPolicy) string { + cacheKeyHash, err := blake2b.New256(nil) + if err != nil { + panic(err) + } + for _, policy := range policies { + cacheKeyHash.Write([]byte(policy.Name)) + binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex) + } + cacheKey := string(cacheKeyHash.Sum(nil)) + return cacheKey +} + +// CompileACLObject compiles a set of ACL policies into an ACL object with a cache +func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) { + // Sort the policies to ensure consistent ordering + sort.Slice(policies, func(i, j int) bool { + return policies[i].Name < policies[j].Name + }) + + // Determine the cache key + cacheKey := ACLPolicyListHash(policies) + aclRaw, ok := cache.Get(cacheKey) + if ok { + return aclRaw.(*acl.ACL), nil + } + + // Parse the policies + parsed := make([]*acl.Policy, 0, len(policies)) + for _, policy := range policies { + p, err := acl.Parse(policy.Rules) + if err != nil { + return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err) + } + parsed = append(parsed, p) + } + + // Create the ACL object + aclObj, err := acl.NewACL(false, parsed) + if err != nil { + return nil, fmt.Errorf("failed to construct ACL: %v", err) + } + + // Update the cache + cache.Add(cacheKey, aclObj) + return aclObj, nil +} + +// GenerateMigrateToken will create a token for a client to access an +// authenticated volume of another client to migrate data for sticky volumes. +func GenerateMigrateToken(allocID, nodeSecretID string) (string, error) { + h, err := blake2b.New512([]byte(nodeSecretID)) + if err != nil { + return "", err + } + h.Write([]byte(allocID)) + return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil +} + +// CompareMigrateToken returns true if two migration tokens can be computed and +// are equal. +func CompareMigrateToken(allocID, nodeSecretID, otherMigrateToken string) bool { + h, err := blake2b.New512([]byte(nodeSecretID)) + if err != nil { + return false + } + h.Write([]byte(allocID)) + + otherBytes, err := base64.URLEncoding.DecodeString(otherMigrateToken) + if err != nil { + return false + } + return subtle.ConstantTimeCompare(h.Sum(nil), otherBytes) == 1 +} + +// ParsePortRanges parses the passed port range string and returns a list of the +// ports. The specification is a comma separated list of either port numbers or +// port ranges. A port number is a single integer and a port range is two +// integers separated by a hyphen. As an example the following spec would +// convert to: ParsePortRanges("10,12-14,16") -> []uint64{10, 12, 13, 14, 16} +func ParsePortRanges(spec string) ([]uint64, error) { + parts := strings.Split(spec, ",") + + // Hot path the empty case + if len(parts) == 1 && parts[0] == "" { + return nil, nil + } + + ports := make(map[uint64]struct{}) + for _, part := range parts { + part = strings.TrimSpace(part) + rangeParts := strings.Split(part, "-") + l := len(rangeParts) + switch l { + case 1: + if val := rangeParts[0]; val == "" { + return nil, fmt.Errorf("can't specify empty port") + } else { + port, err := strconv.ParseUint(val, 10, 0) + if err != nil { + return nil, err + } + ports[port] = struct{}{} + } + case 2: + // We are parsing a range + start, err := strconv.ParseUint(rangeParts[0], 10, 0) + if err != nil { + return nil, err + } + + end, err := strconv.ParseUint(rangeParts[1], 10, 0) + if err != nil { + return nil, err + } + + if end < start { + return nil, fmt.Errorf("invalid range: starting value (%v) less than ending (%v) value", end, start) + } + + for i := start; i <= end; i++ { + ports[i] = struct{}{} + } + default: + return nil, fmt.Errorf("can only parse single port numbers or port ranges (ex. 80,100-120,150)") + } + } + + var results []uint64 + for port := range ports { + results = append(results, port) + } + + sort.Slice(results, func(i, j int) bool { + return results[i] < results[j] + }) + return results, nil +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/generate.sh b/vendor/github.com/hashicorp/nomad/nomad/structs/generate.sh new file mode 100644 index 0000000..34147c9 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/generate.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -e + +FILES="$(ls *[!_test].go | tr '\n' ' ')" +codecgen -d 100 -o structs.generated.go ${FILES} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go new file mode 100644 index 0000000..aaa81b6 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go @@ -0,0 +1,401 @@ +package structs + +import ( + "fmt" + "math/rand" + "net" + "sync" +) + +const ( + // MinDynamicPort is the smallest dynamic port generated + MinDynamicPort = 20000 + + // MaxDynamicPort is the largest dynamic port generated + MaxDynamicPort = 32000 + + // maxRandPortAttempts is the maximum number of attempt + // to assign a random port + maxRandPortAttempts = 20 + + // maxValidPort is the max valid port number + maxValidPort = 65536 +) + +var ( + // bitmapPool is used to pool the bitmaps used for port collision + // checking. They are fairly large (8K) so we can re-use them to + // avoid GC pressure. Care should be taken to call Clear() on any + // bitmap coming from the pool. + bitmapPool = new(sync.Pool) +) + +// NetworkIndex is used to index the available network resources +// and the used network resources on a machine given allocations +type NetworkIndex struct { + AvailNetworks []*NetworkResource // List of available networks + AvailBandwidth map[string]int // Bandwidth by device + UsedPorts map[string]Bitmap // Ports by IP + UsedBandwidth map[string]int // Bandwidth by device +} + +// NewNetworkIndex is used to construct a new network index +func NewNetworkIndex() *NetworkIndex { + return &NetworkIndex{ + AvailBandwidth: make(map[string]int), + UsedPorts: make(map[string]Bitmap), + UsedBandwidth: make(map[string]int), + } +} + +// Release is called when the network index is no longer needed +// to attempt to re-use some of the memory it has allocated +func (idx *NetworkIndex) Release() { + for _, b := range idx.UsedPorts { + bitmapPool.Put(b) + } +} + +// Overcommitted checks if the network is overcommitted +func (idx *NetworkIndex) Overcommitted() bool { + for device, used := range idx.UsedBandwidth { + avail := idx.AvailBandwidth[device] + if used > avail { + return true + } + } + return false +} + +// SetNode is used to setup the available network resources. Returns +// true if there is a collision +func (idx *NetworkIndex) SetNode(node *Node) (collide bool) { + + // COMPAT(0.11): Remove in 0.11 + // Grab the network resources, handling both new and old + var networks []*NetworkResource + if node.NodeResources != nil && len(node.NodeResources.Networks) != 0 { + networks = node.NodeResources.Networks + } else if node.Resources != nil { + networks = node.Resources.Networks + } + + // Add the available CIDR blocks + for _, n := range networks { + if n.Device != "" { + idx.AvailNetworks = append(idx.AvailNetworks, n) + idx.AvailBandwidth[n.Device] = n.MBits + } + } + + // COMPAT(0.11): Remove in 0.11 + // Handle reserving ports, handling both new and old + if node.ReservedResources != nil && node.ReservedResources.Networks.ReservedHostPorts != "" { + collide = idx.AddReservedPortRange(node.ReservedResources.Networks.ReservedHostPorts) + } else if node.Reserved != nil { + for _, n := range node.Reserved.Networks { + if idx.AddReserved(n) { + collide = true + } + } + } + + return +} + +// AddAllocs is used to add the used network resources. Returns +// true if there is a collision +func (idx *NetworkIndex) AddAllocs(allocs []*Allocation) (collide bool) { + for _, alloc := range allocs { + // Do not consider the resource impact of terminal allocations + if alloc.TerminalStatus() { + continue + } + + if alloc.AllocatedResources != nil { + for _, task := range alloc.AllocatedResources.Tasks { + if len(task.Networks) == 0 { + continue + } + n := task.Networks[0] + if idx.AddReserved(n) { + collide = true + } + } + } else { + // COMPAT(0.11): Remove in 0.11 + for _, task := range alloc.TaskResources { + if len(task.Networks) == 0 { + continue + } + n := task.Networks[0] + if idx.AddReserved(n) { + collide = true + } + } + } + } + return +} + +// AddReserved is used to add a reserved network usage, returns true +// if there is a port collision +func (idx *NetworkIndex) AddReserved(n *NetworkResource) (collide bool) { + // Add the port usage + used := idx.UsedPorts[n.IP] + if used == nil { + // Try to get a bitmap from the pool, else create + raw := bitmapPool.Get() + if raw != nil { + used = raw.(Bitmap) + used.Clear() + } else { + used, _ = NewBitmap(maxValidPort) + } + idx.UsedPorts[n.IP] = used + } + + for _, ports := range [][]Port{n.ReservedPorts, n.DynamicPorts} { + for _, port := range ports { + // Guard against invalid port + if port.Value < 0 || port.Value >= maxValidPort { + return true + } + if used.Check(uint(port.Value)) { + collide = true + } else { + used.Set(uint(port.Value)) + } + } + } + + // Add the bandwidth + idx.UsedBandwidth[n.Device] += n.MBits + return +} + +// AddReservedPortRange marks the ports given as reserved on all network +// interfaces. The port format is comma delimited, with spans given as n1-n2 +// (80,100-200,205) +func (idx *NetworkIndex) AddReservedPortRange(ports string) (collide bool) { + // Convert the ports into a slice of ints + resPorts, err := ParsePortRanges(ports) + if err != nil { + return + } + + // Ensure we create a bitmap for each available network + for _, n := range idx.AvailNetworks { + used := idx.UsedPorts[n.IP] + if used == nil { + // Try to get a bitmap from the pool, else create + raw := bitmapPool.Get() + if raw != nil { + used = raw.(Bitmap) + used.Clear() + } else { + used, _ = NewBitmap(maxValidPort) + } + idx.UsedPorts[n.IP] = used + } + } + + for _, used := range idx.UsedPorts { + for _, port := range resPorts { + // Guard against invalid port + if port < 0 || port >= maxValidPort { + return true + } + if used.Check(uint(port)) { + collide = true + } else { + used.Set(uint(port)) + } + } + } + + return +} + +// yieldIP is used to iteratively invoke the callback with +// an available IP +func (idx *NetworkIndex) yieldIP(cb func(net *NetworkResource, ip net.IP) bool) { + inc := func(ip net.IP) { + for j := len(ip) - 1; j >= 0; j-- { + ip[j]++ + if ip[j] > 0 { + break + } + } + } + + for _, n := range idx.AvailNetworks { + ip, ipnet, err := net.ParseCIDR(n.CIDR) + if err != nil { + continue + } + for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) { + if cb(n, ip) { + return + } + } + } +} + +// AssignNetwork is used to assign network resources given an ask. +// If the ask cannot be satisfied, returns nil +func (idx *NetworkIndex) AssignNetwork(ask *NetworkResource) (out *NetworkResource, err error) { + err = fmt.Errorf("no networks available") + idx.yieldIP(func(n *NetworkResource, ip net.IP) (stop bool) { + // Convert the IP to a string + ipStr := ip.String() + + // Check if we would exceed the bandwidth cap + availBandwidth := idx.AvailBandwidth[n.Device] + usedBandwidth := idx.UsedBandwidth[n.Device] + if usedBandwidth+ask.MBits > availBandwidth { + err = fmt.Errorf("bandwidth exceeded") + return + } + + used := idx.UsedPorts[ipStr] + + // Check if any of the reserved ports are in use + for _, port := range ask.ReservedPorts { + // Guard against invalid port + if port.Value < 0 || port.Value >= maxValidPort { + err = fmt.Errorf("invalid port %d (out of range)", port.Value) + return + } + + // Check if in use + if used != nil && used.Check(uint(port.Value)) { + err = fmt.Errorf("reserved port collision") + return + } + } + + // Create the offer + offer := &NetworkResource{ + Device: n.Device, + IP: ipStr, + MBits: ask.MBits, + ReservedPorts: ask.ReservedPorts, + DynamicPorts: ask.DynamicPorts, + } + + // Try to stochastically pick the dynamic ports as it is faster and + // lower memory usage. + var dynPorts []int + var dynErr error + dynPorts, dynErr = getDynamicPortsStochastic(used, ask) + if dynErr == nil { + goto BUILD_OFFER + } + + // Fall back to the precise method if the random sampling failed. + dynPorts, dynErr = getDynamicPortsPrecise(used, ask) + if dynErr != nil { + err = dynErr + return + } + + BUILD_OFFER: + for i, port := range dynPorts { + offer.DynamicPorts[i].Value = port + } + + // Stop, we have an offer! + out = offer + err = nil + return true + }) + return +} + +// getDynamicPortsPrecise takes the nodes used port bitmap which may be nil if +// no ports have been allocated yet, the network ask and returns a set of unused +// ports to fullfil the ask's DynamicPorts or an error if it failed. An error +// means the ask can not be satisfied as the method does a precise search. +func getDynamicPortsPrecise(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) { + // Create a copy of the used ports and apply the new reserves + var usedSet Bitmap + var err error + if nodeUsed != nil { + usedSet, err = nodeUsed.Copy() + if err != nil { + return nil, err + } + } else { + usedSet, err = NewBitmap(maxValidPort) + if err != nil { + return nil, err + } + } + + for _, port := range ask.ReservedPorts { + usedSet.Set(uint(port.Value)) + } + + // Get the indexes of the unset + availablePorts := usedSet.IndexesInRange(false, MinDynamicPort, MaxDynamicPort) + + // Randomize the amount we need + numDyn := len(ask.DynamicPorts) + if len(availablePorts) < numDyn { + return nil, fmt.Errorf("dynamic port selection failed") + } + + numAvailable := len(availablePorts) + for i := 0; i < numDyn; i++ { + j := rand.Intn(numAvailable) + availablePorts[i], availablePorts[j] = availablePorts[j], availablePorts[i] + } + + return availablePorts[:numDyn], nil +} + +// getDynamicPortsStochastic takes the nodes used port bitmap which may be nil if +// no ports have been allocated yet, the network ask and returns a set of unused +// ports to fullfil the ask's DynamicPorts or an error if it failed. An error +// does not mean the ask can not be satisfied as the method has a fixed amount +// of random probes and if these fail, the search is aborted. +func getDynamicPortsStochastic(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) { + var reserved, dynamic []int + for _, port := range ask.ReservedPorts { + reserved = append(reserved, port.Value) + } + + for i := 0; i < len(ask.DynamicPorts); i++ { + attempts := 0 + PICK: + attempts++ + if attempts > maxRandPortAttempts { + return nil, fmt.Errorf("stochastic dynamic port selection failed") + } + + randPort := MinDynamicPort + rand.Intn(MaxDynamicPort-MinDynamicPort) + if nodeUsed != nil && nodeUsed.Check(uint(randPort)) { + goto PICK + } + + for _, ports := range [][]int{reserved, dynamic} { + if isPortReserved(ports, randPort) { + goto PICK + } + } + dynamic = append(dynamic, randPort) + } + + return dynamic, nil +} + +// IntContains scans an integer slice for a value +func isPortReserved(haystack []int, needle int) bool { + for _, item := range haystack { + if item == needle { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/node.go b/vendor/github.com/hashicorp/nomad/nomad/structs/node.go new file mode 100644 index 0000000..76758fb --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/node.go @@ -0,0 +1,62 @@ +package structs + +import ( + "time" + + "github.com/hashicorp/nomad/helper" +) + +// DriverInfo is the current state of a single driver. This is updated +// regularly as driver health changes on the node. +type DriverInfo struct { + Attributes map[string]string + Detected bool + Healthy bool + HealthDescription string + UpdateTime time.Time +} + +func (di *DriverInfo) Copy() *DriverInfo { + if di == nil { + return nil + } + + cdi := new(DriverInfo) + *cdi = *di + cdi.Attributes = helper.CopyMapStringString(di.Attributes) + return cdi +} + +// MergeHealthCheck merges information from a health check for a drier into a +// node's driver info +func (di *DriverInfo) MergeHealthCheck(other *DriverInfo) { + di.Healthy = other.Healthy + di.HealthDescription = other.HealthDescription + di.UpdateTime = other.UpdateTime +} + +// MergeFingerprint merges information from fingerprinting a node for a driver +// into a node's driver info for that driver. +func (di *DriverInfo) MergeFingerprintInfo(other *DriverInfo) { + di.Detected = other.Detected + di.Attributes = other.Attributes +} + +// DriverInfo determines if two driver info objects are equal..As this is used +// in the process of health checking, we only check the fields that are +// computed by the health checker. In the future, this will be merged. +func (di *DriverInfo) HealthCheckEquals(other *DriverInfo) bool { + if di == nil && other == nil { + return true + } + + if di.Healthy != other.Healthy { + return false + } + + if di.HealthDescription != other.HealthDescription { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go b/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go new file mode 100644 index 0000000..fbeb939 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go @@ -0,0 +1,132 @@ +package structs + +import ( + "fmt" + "strings" + + "github.com/mitchellh/hashstructure" +) + +const ( + // NodeUniqueNamespace is a prefix that can be appended to node meta or + // attribute keys to mark them for exclusion in computed node class. + NodeUniqueNamespace = "unique." +) + +// UniqueNamespace takes a key and returns the key marked under the unique +// namespace. +func UniqueNamespace(key string) string { + return fmt.Sprintf("%s%s", NodeUniqueNamespace, key) +} + +// IsUniqueNamespace returns whether the key is under the unique namespace. +func IsUniqueNamespace(key string) bool { + return strings.HasPrefix(key, NodeUniqueNamespace) +} + +// ComputeClass computes a derived class for the node based on its attributes. +// ComputedClass is a unique id that identifies nodes with a common set of +// attributes and capabilities. Thus, when calculating a node's computed class +// we avoid including any uniquely identifying fields. +func (n *Node) ComputeClass() error { + hash, err := hashstructure.Hash(n, nil) + if err != nil { + return err + } + + n.ComputedClass = fmt.Sprintf("v1:%d", hash) + return nil +} + +// HashInclude is used to blacklist uniquely identifying node fields from being +// included in the computed node class. +func (n Node) HashInclude(field string, v interface{}) (bool, error) { + switch field { + case "Datacenter", "Attributes", "Meta", "NodeClass", "NodeResources": + return true, nil + default: + return false, nil + } +} + +// HashIncludeMap is used to blacklist uniquely identifying node map keys from being +// included in the computed node class. +func (n Node) HashIncludeMap(field string, k, v interface{}) (bool, error) { + key, ok := k.(string) + if !ok { + return false, fmt.Errorf("map key %v not a string", k) + } + + switch field { + case "Meta", "Attributes": + return !IsUniqueNamespace(key), nil + default: + return false, fmt.Errorf("unexpected map field: %v", field) + } +} + +// HashInclude is used to blacklist uniquely identifying node fields from being +// included in the computed node class. +func (n NodeResources) HashInclude(field string, v interface{}) (bool, error) { + switch field { + case "Devices": + return true, nil + default: + return false, nil + } +} + +// HashInclude is used to blacklist uniquely identifying node fields from being +// included in the computed node class. +func (n NodeDeviceResource) HashInclude(field string, v interface{}) (bool, error) { + switch field { + case "Vendor", "Type", "Name", "Attributes": + return true, nil + default: + return false, nil + } +} + +// HashIncludeMap is used to blacklist uniquely identifying node map keys from being +// included in the computed node class. +func (n NodeDeviceResource) HashIncludeMap(field string, k, v interface{}) (bool, error) { + key, ok := k.(string) + if !ok { + return false, fmt.Errorf("map key %v not a string", k) + } + + switch field { + case "Attributes": + return !IsUniqueNamespace(key), nil + default: + return false, fmt.Errorf("unexpected map field: %v", field) + } +} + +// EscapedConstraints takes a set of constraints and returns the set that +// escapes computed node classes. +func EscapedConstraints(constraints []*Constraint) []*Constraint { + var escaped []*Constraint + for _, c := range constraints { + if constraintTargetEscapes(c.LTarget) || constraintTargetEscapes(c.RTarget) { + escaped = append(escaped, c) + } + } + + return escaped +} + +// constraintTargetEscapes returns whether the target of a constraint escapes +// computed node class optimization. +func constraintTargetEscapes(target string) bool { + switch { + case strings.HasPrefix(target, "${node.unique."): + return true + case strings.HasPrefix(target, "${attr.unique."): + return true + case strings.HasPrefix(target, "${meta.unique."): + return true + default: + return false + } +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go b/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go new file mode 100644 index 0000000..cad46de --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go @@ -0,0 +1,169 @@ +package structs + +import ( + "time" + + "github.com/hashicorp/raft" +) + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Nomad. + ID raft.ServerID + + // Node is the node name of the server, as known by Nomad, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address raft.ServerAddress + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Nomad. + Voter bool + + // RaftProtocol is the version of the Raft protocol spoken by this server. + RaftProtocol string +} + +// RaftConfigurationResponse is returned when querying for the current Raft +// configuration. +type RaftConfigurationResponse struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftPeerByAddressRequest is used by the Operator endpoint to apply a Raft +// operation on a specific Raft peer by address in the form of "IP:port". +type RaftPeerByAddressRequest struct { + // Address is the peer to remove, in the form "IP:port". + Address raft.ServerAddress + + // WriteRequest holds the Region for this request. + WriteRequest +} + +// RaftPeerByIDRequest is used by the Operator endpoint to apply a Raft +// operation on a specific Raft peer by ID. +type RaftPeerByIDRequest struct { + // ID is the peer ID to remove. + ID raft.ServerID + + // WriteRequest holds the Region for this request. + WriteRequest +} + +// AutopilotSetConfigRequest is used by the Operator endpoint to update the +// current Autopilot configuration of the cluster. +type AutopilotSetConfigRequest struct { + // Datacenter is the target this request is intended for. + Datacenter string + + // Config is the new Autopilot configuration to use. + Config AutopilotConfig + + // CAS controls whether to use check-and-set semantics for this request. + CAS bool + + // WriteRequest holds the ACL token to go along with this request. + WriteRequest +} + +// RequestDatacenter returns the datacenter for a given request. +func (op *AutopilotSetConfigRequest) RequestDatacenter() string { + return op.Datacenter +} + +// AutopilotConfig is the internal config for the Autopilot mechanism. +type AutopilotConfig struct { + // CleanupDeadServers controls whether to remove dead servers when a new + // server is added to the Raft peers. + CleanupDeadServers bool + + // ServerStabilizationTime is the minimum amount of time a server must be + // in a stable, healthy state before it can be added to the cluster. Only + // applicable with Raft protocol version 3 or higher. + ServerStabilizationTime time.Duration + + // LastContactThreshold is the limit on the amount of time a server can go + // without leader contact before being considered unhealthy. + LastContactThreshold time.Duration + + // MaxTrailingLogs is the amount of entries in the Raft Log that a server can + // be behind before being considered unhealthy. + MaxTrailingLogs uint64 + + // (Enterprise-only) EnableRedundancyZones specifies whether to enable redundancy zones. + EnableRedundancyZones bool + + // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration + // strategy of waiting until enough newer-versioned servers have been added to the + // cluster before promoting them to voters. + DisableUpgradeMigration bool + + // (Enterprise-only) EnableCustomUpgrades specifies whether to enable using custom + // upgrade versions when performing migrations. + EnableCustomUpgrades bool + + // CreateIndex/ModifyIndex store the create/modify indexes of this configuration. + CreateIndex uint64 + ModifyIndex uint64 +} + +// SchedulerConfiguration is the config for controlling scheduler behavior +type SchedulerConfiguration struct { + // PreemptionConfig specifies whether to enable eviction of lower + // priority jobs to place higher priority jobs. + PreemptionConfig PreemptionConfig + + // CreateIndex/ModifyIndex store the create/modify indexes of this configuration. + CreateIndex uint64 + ModifyIndex uint64 +} + +// SchedulerConfigurationResponse is the response object that wraps SchedulerConfiguration +type SchedulerConfigurationResponse struct { + // SchedulerConfig contains scheduler config options + SchedulerConfig *SchedulerConfiguration + + QueryMeta +} + +// SchedulerSetConfigurationResponse is the response object used +// when updating scheduler configuration +type SchedulerSetConfigurationResponse struct { + // Updated returns whether the config was actually updated + // Only set when the request uses CAS + Updated bool + + WriteMeta +} + +// PreemptionConfig specifies whether preemption is enabled based on scheduler type +type PreemptionConfig struct { + // SystemSchedulerEnabled specifies if preemption is enabled for system jobs + SystemSchedulerEnabled bool +} + +// SchedulerSetConfigRequest is used by the Operator endpoint to update the +// current Scheduler configuration of the cluster. +type SchedulerSetConfigRequest struct { + // Config is the new Scheduler configuration to use. + Config SchedulerConfiguration + + // CAS controls whether to use check-and-set semantics for this request. + CAS bool + + // WriteRequest holds the ACL token to go along with this request. + WriteRequest +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/streaming_rpc.go b/vendor/github.com/hashicorp/nomad/nomad/structs/streaming_rpc.go new file mode 100644 index 0000000..42559d4 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/streaming_rpc.go @@ -0,0 +1,72 @@ +package structs + +import ( + "fmt" + "io" + "sync" +) + +// StreamingRpcHeader is the first struct serialized after entering the +// streaming RPC mode. The header is used to dispatch to the correct method. +type StreamingRpcHeader struct { + // Method is the name of the method to invoke. + Method string +} + +// StreamingRpcAck is used to acknowledge receiving the StreamingRpcHeader and +// routing to the requested handler. +type StreamingRpcAck struct { + // Error is used to return whether an error occurred establishing the + // streaming RPC. This error occurs before entering the RPC handler. + Error string +} + +// StreamingRpcHandler defines the handler for a streaming RPC. +type StreamingRpcHandler func(conn io.ReadWriteCloser) + +// StreamingRpcRegistry is used to add and retrieve handlers +type StreamingRpcRegistry struct { + registry map[string]StreamingRpcHandler +} + +// NewStreamingRpcRegistry creates a new registry. All registrations of +// handlers should be done before retrieving handlers. +func NewStreamingRpcRegistry() *StreamingRpcRegistry { + return &StreamingRpcRegistry{ + registry: make(map[string]StreamingRpcHandler), + } +} + +// Register registers a new handler for the given method name +func (s *StreamingRpcRegistry) Register(method string, handler StreamingRpcHandler) { + s.registry[method] = handler +} + +// GetHandler returns a handler for the given method or an error if it doesn't exist. +func (s *StreamingRpcRegistry) GetHandler(method string) (StreamingRpcHandler, error) { + h, ok := s.registry[method] + if !ok { + return nil, fmt.Errorf("%s: %q", ErrUnknownMethod, method) + } + + return h, nil +} + +// Bridge is used to just link two connections together and copy traffic +func Bridge(a, b io.ReadWriteCloser) { + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + defer wg.Done() + io.Copy(a, b) + a.Close() + b.Close() + }() + go func() { + defer wg.Done() + io.Copy(b, a) + a.Close() + b.Close() + }() + wg.Wait() +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.generated.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.generated.go new file mode 100644 index 0000000..092051f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.generated.go @@ -0,0 +1,8473 @@ +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package structs + +import ( + "errors" + "fmt" + pkg1_raft "github.com/hashicorp/raft" + codec1978 "github.com/ugorji/go/codec" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF8100 = 1 + codecSelferC_RAW100 = 0 + // ----- value types used ---- + codecSelferValueTypeArray100 = 10 + codecSelferValueTypeMap100 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey100 = 2 + codecSelfer_containerMapValue100 = 3 + codecSelfer_containerMapEnd100 = 4 + codecSelfer_containerArrayElem100 = 6 + codecSelfer_containerArrayEnd100 = 7 +) + +var ( + codecSelferBitsize100 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr100 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer100 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_raft.ServerID + var v1 time.Duration + _, _ = v0, v1 + } +} + +func (x Bitmap) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encBitmap((Bitmap)(x), e) + } + } +} + +func (x *Bitmap) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decBitmap((*Bitmap)(x), d) + } +} + +func (x DiffType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x)) + } +} + +func (x *DiffType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *JobDiff) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Type")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.ID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("ID")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.ID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Fields == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePtrtoFieldDiff(([]*FieldDiff)(x.Fields), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Fields")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Fields == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSlicePtrtoFieldDiff(([]*FieldDiff)(x.Fields), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Objects == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSlicePtrtoObjectDiff(([]*ObjectDiff)(x.Objects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Objects")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Objects == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encSlicePtrtoObjectDiff(([]*ObjectDiff)(x.Objects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.TaskGroups == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePtrtoTaskGroupDiff(([]*TaskGroupDiff)(x.TaskGroups), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("TaskGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.TaskGroups == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSlicePtrtoTaskGroupDiff(([]*TaskGroupDiff)(x.TaskGroups), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *JobDiff) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *JobDiff) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "ID": + if r.TryDecodeAsNil() { + x.ID = "" + } else { + yyv5 := &x.ID + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "Fields": + if r.TryDecodeAsNil() { + x.Fields = nil + } else { + yyv7 := &x.Fields + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSlicePtrtoFieldDiff((*[]*FieldDiff)(yyv7), d) + } + } + case "Objects": + if r.TryDecodeAsNil() { + x.Objects = nil + } else { + yyv9 := &x.Objects + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePtrtoObjectDiff((*[]*ObjectDiff)(yyv9), d) + } + } + case "TaskGroups": + if r.TryDecodeAsNil() { + x.TaskGroups = nil + } else { + yyv11 := &x.TaskGroups + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSlicePtrtoTaskGroupDiff((*[]*TaskGroupDiff)(yyv11), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *JobDiff) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv14 := &x.Type + yyv14.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.ID = "" + } else { + yyv15 := &x.ID + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Fields = nil + } else { + yyv17 := &x.Fields + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSlicePtrtoFieldDiff((*[]*FieldDiff)(yyv17), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Objects = nil + } else { + yyv19 := &x.Objects + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePtrtoObjectDiff((*[]*ObjectDiff)(yyv19), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.TaskGroups = nil + } else { + yyv21 := &x.TaskGroups + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + h.decSlicePtrtoTaskGroupDiff((*[]*TaskGroupDiff)(yyv21), d) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *TaskGroupDiff) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 6 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Type")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Name")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Fields == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePtrtoFieldDiff(([]*FieldDiff)(x.Fields), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Fields")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Fields == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSlicePtrtoFieldDiff(([]*FieldDiff)(x.Fields), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Objects == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSlicePtrtoObjectDiff(([]*ObjectDiff)(x.Objects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Objects")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Objects == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encSlicePtrtoObjectDiff(([]*ObjectDiff)(x.Objects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Tasks == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePtrtoTaskDiff(([]*TaskDiff)(x.Tasks), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Tasks")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Tasks == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSlicePtrtoTaskDiff(([]*TaskDiff)(x.Tasks), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Updates == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + z.F.EncMapStringUint64V(x.Updates, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Updates")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Updates == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + z.F.EncMapStringUint64V(x.Updates, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *TaskGroupDiff) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *TaskGroupDiff) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "Name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv5 := &x.Name + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "Fields": + if r.TryDecodeAsNil() { + x.Fields = nil + } else { + yyv7 := &x.Fields + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSlicePtrtoFieldDiff((*[]*FieldDiff)(yyv7), d) + } + } + case "Objects": + if r.TryDecodeAsNil() { + x.Objects = nil + } else { + yyv9 := &x.Objects + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePtrtoObjectDiff((*[]*ObjectDiff)(yyv9), d) + } + } + case "Tasks": + if r.TryDecodeAsNil() { + x.Tasks = nil + } else { + yyv11 := &x.Tasks + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSlicePtrtoTaskDiff((*[]*TaskDiff)(yyv11), d) + } + } + case "Updates": + if r.TryDecodeAsNil() { + x.Updates = nil + } else { + yyv13 := &x.Updates + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecMapStringUint64X(yyv13, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *TaskGroupDiff) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv16 := &x.Type + yyv16.CodecDecodeSelf(d) + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv17 := &x.Name + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Fields = nil + } else { + yyv19 := &x.Fields + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePtrtoFieldDiff((*[]*FieldDiff)(yyv19), d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Objects = nil + } else { + yyv21 := &x.Objects + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + h.decSlicePtrtoObjectDiff((*[]*ObjectDiff)(yyv21), d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Tasks = nil + } else { + yyv23 := &x.Tasks + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSlicePtrtoTaskDiff((*[]*TaskDiff)(yyv23), d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Updates = nil + } else { + yyv25 := &x.Updates + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + z.F.DecMapStringUint64X(yyv25, false, d) + } + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x TaskGroupDiffs) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encTaskGroupDiffs((TaskGroupDiffs)(x), e) + } + } +} + +func (x *TaskGroupDiffs) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decTaskGroupDiffs((*TaskGroupDiffs)(x), d) + } +} + +func (x *TaskDiff) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Type")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Name")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Fields == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePtrtoFieldDiff(([]*FieldDiff)(x.Fields), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Fields")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Fields == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSlicePtrtoFieldDiff(([]*FieldDiff)(x.Fields), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Objects == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSlicePtrtoObjectDiff(([]*ObjectDiff)(x.Objects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Objects")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Objects == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encSlicePtrtoObjectDiff(([]*ObjectDiff)(x.Objects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Annotations == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.Annotations, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Annotations")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Annotations == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.Annotations, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *TaskDiff) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *TaskDiff) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "Name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv5 := &x.Name + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "Fields": + if r.TryDecodeAsNil() { + x.Fields = nil + } else { + yyv7 := &x.Fields + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSlicePtrtoFieldDiff((*[]*FieldDiff)(yyv7), d) + } + } + case "Objects": + if r.TryDecodeAsNil() { + x.Objects = nil + } else { + yyv9 := &x.Objects + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePtrtoObjectDiff((*[]*ObjectDiff)(yyv9), d) + } + } + case "Annotations": + if r.TryDecodeAsNil() { + x.Annotations = nil + } else { + yyv11 := &x.Annotations + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + z.F.DecSliceStringX(yyv11, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *TaskDiff) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv14 := &x.Type + yyv14.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv15 := &x.Name + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Fields = nil + } else { + yyv17 := &x.Fields + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSlicePtrtoFieldDiff((*[]*FieldDiff)(yyv17), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Objects = nil + } else { + yyv19 := &x.Objects + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePtrtoObjectDiff((*[]*ObjectDiff)(yyv19), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Annotations = nil + } else { + yyv21 := &x.Annotations + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + z.F.DecSliceStringX(yyv21, false, d) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x TaskDiffs) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encTaskDiffs((TaskDiffs)(x), e) + } + } +} + +func (x *TaskDiffs) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decTaskDiffs((*TaskDiffs)(x), d) + } +} + +func (x *ObjectDiff) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Type")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Name")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Fields == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePtrtoFieldDiff(([]*FieldDiff)(x.Fields), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Fields")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Fields == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSlicePtrtoFieldDiff(([]*FieldDiff)(x.Fields), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Objects == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSlicePtrtoObjectDiff(([]*ObjectDiff)(x.Objects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Objects")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Objects == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encSlicePtrtoObjectDiff(([]*ObjectDiff)(x.Objects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *ObjectDiff) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *ObjectDiff) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "Name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv5 := &x.Name + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "Fields": + if r.TryDecodeAsNil() { + x.Fields = nil + } else { + yyv7 := &x.Fields + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSlicePtrtoFieldDiff((*[]*FieldDiff)(yyv7), d) + } + } + case "Objects": + if r.TryDecodeAsNil() { + x.Objects = nil + } else { + yyv9 := &x.Objects + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePtrtoObjectDiff((*[]*ObjectDiff)(yyv9), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *ObjectDiff) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv12 := &x.Type + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Fields = nil + } else { + yyv15 := &x.Fields + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSlicePtrtoFieldDiff((*[]*FieldDiff)(yyv15), d) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Objects = nil + } else { + yyv17 := &x.Objects + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSlicePtrtoObjectDiff((*[]*ObjectDiff)(yyv17), d) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x ObjectDiffs) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encObjectDiffs((ObjectDiffs)(x), e) + } + } +} + +func (x *ObjectDiffs) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decObjectDiffs((*ObjectDiffs)(x), d) + } +} + +func (x *FieldDiff) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Type")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Name")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Old)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Old")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Old)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.New)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("New")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.New)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Annotations == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.Annotations, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Annotations")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Annotations == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.Annotations, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *FieldDiff) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *FieldDiff) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "Name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv5 := &x.Name + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "Old": + if r.TryDecodeAsNil() { + x.Old = "" + } else { + yyv7 := &x.Old + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "New": + if r.TryDecodeAsNil() { + x.New = "" + } else { + yyv9 := &x.New + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + case "Annotations": + if r.TryDecodeAsNil() { + x.Annotations = nil + } else { + yyv11 := &x.Annotations + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + z.F.DecSliceStringX(yyv11, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *FieldDiff) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv14 := &x.Type + yyv14.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv15 := &x.Name + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Old = "" + } else { + yyv17 := &x.Old + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.New = "" + } else { + yyv19 := &x.New + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Annotations = nil + } else { + yyv21 := &x.Annotations + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + z.F.DecSliceStringX(yyv21, false, d) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x FieldDiffs) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encFieldDiffs((FieldDiffs)(x), e) + } + } +} + +func (x *FieldDiffs) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decFieldDiffs((*FieldDiffs)(x), d) + } +} + +func (x *NetworkIndex) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.AvailNetworks == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePtrtoNetworkResource(([]*NetworkResource)(x.AvailNetworks), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("AvailNetworks")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.AvailNetworks == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePtrtoNetworkResource(([]*NetworkResource)(x.AvailNetworks), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.AvailBandwidth == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncMapStringIntV(x.AvailBandwidth, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("AvailBandwidth")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.AvailBandwidth == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncMapStringIntV(x.AvailBandwidth, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.UsedPorts == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encMapstringBitmap((map[string]Bitmap)(x.UsedPorts), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("UsedPorts")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.UsedPorts == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encMapstringBitmap((map[string]Bitmap)(x.UsedPorts), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.UsedBandwidth == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncMapStringIntV(x.UsedBandwidth, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("UsedBandwidth")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.UsedBandwidth == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncMapStringIntV(x.UsedBandwidth, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *NetworkIndex) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *NetworkIndex) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "AvailNetworks": + if r.TryDecodeAsNil() { + x.AvailNetworks = nil + } else { + yyv4 := &x.AvailNetworks + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePtrtoNetworkResource((*[]*NetworkResource)(yyv4), d) + } + } + case "AvailBandwidth": + if r.TryDecodeAsNil() { + x.AvailBandwidth = nil + } else { + yyv6 := &x.AvailBandwidth + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecMapStringIntX(yyv6, false, d) + } + } + case "UsedPorts": + if r.TryDecodeAsNil() { + x.UsedPorts = nil + } else { + yyv8 := &x.UsedPorts + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decMapstringBitmap((*map[string]Bitmap)(yyv8), d) + } + } + case "UsedBandwidth": + if r.TryDecodeAsNil() { + x.UsedBandwidth = nil + } else { + yyv10 := &x.UsedBandwidth + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecMapStringIntX(yyv10, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *NetworkIndex) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.AvailNetworks = nil + } else { + yyv13 := &x.AvailNetworks + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSlicePtrtoNetworkResource((*[]*NetworkResource)(yyv13), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.AvailBandwidth = nil + } else { + yyv15 := &x.AvailBandwidth + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + z.F.DecMapStringIntX(yyv15, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.UsedPorts = nil + } else { + yyv17 := &x.UsedPorts + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decMapstringBitmap((*map[string]Bitmap)(yyv17), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.UsedBandwidth = nil + } else { + yyv19 := &x.UsedBandwidth + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + z.F.DecMapStringIntX(yyv19, false, d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *RaftServer) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 6 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.ID) { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.ID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("ID")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.ID) { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.ID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Node)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Node")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Node)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Address) { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Address)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Address")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(x.Address) { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Address)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Leader)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Leader")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Leader)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Voter)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Voter")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Voter)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.RaftProtocol)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("RaftProtocol")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.RaftProtocol)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *RaftServer) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *RaftServer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "ID": + if r.TryDecodeAsNil() { + x.ID = "" + } else { + yyv4 := &x.ID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Node": + if r.TryDecodeAsNil() { + x.Node = "" + } else { + yyv6 := &x.Node + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Address": + if r.TryDecodeAsNil() { + x.Address = "" + } else { + yyv8 := &x.Address + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "Leader": + if r.TryDecodeAsNil() { + x.Leader = false + } else { + yyv10 := &x.Leader + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "Voter": + if r.TryDecodeAsNil() { + x.Voter = false + } else { + yyv12 := &x.Voter + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "RaftProtocol": + if r.TryDecodeAsNil() { + x.RaftProtocol = "" + } else { + yyv14 := &x.RaftProtocol + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *RaftServer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.ID = "" + } else { + yyv17 := &x.ID + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Node = "" + } else { + yyv19 := &x.Node + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Address = "" + } else { + yyv21 := &x.Address + yym22 := z.DecBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.DecExt(yyv21) { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Leader = false + } else { + yyv23 := &x.Leader + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Voter = false + } else { + yyv25 := &x.Voter + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.RaftProtocol = "" + } else { + yyv27 := &x.RaftProtocol + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *RaftConfigurationResponse) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.Servers == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePtrtoRaftServer(([]*RaftServer)(x.Servers), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Servers")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.Servers == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePtrtoRaftServer(([]*RaftServer)(x.Servers), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Index")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *RaftConfigurationResponse) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *RaftConfigurationResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Servers": + if r.TryDecodeAsNil() { + x.Servers = nil + } else { + yyv4 := &x.Servers + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePtrtoRaftServer((*[]*RaftServer)(yyv4), d) + } + } + case "Index": + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv6 := &x.Index + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *RaftConfigurationResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Servers = nil + } else { + yyv9 := &x.Servers + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePtrtoRaftServer((*[]*RaftServer)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv11 := &x.Index + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *RaftPeerByAddressRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.Address) { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Address)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Address")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.Address) { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Address)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Region)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Region")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Region)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Namespace)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Namespace)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.AuthToken)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("AuthToken")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.AuthToken)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Forwarded")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *RaftPeerByAddressRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *RaftPeerByAddressRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Address": + if r.TryDecodeAsNil() { + x.Address = "" + } else { + yyv4 := &x.Address + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Region": + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv6 := &x.Region + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv8 := &x.Namespace + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "AuthToken": + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv10 := &x.AuthToken + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "Forwarded": + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv12 := &x.Forwarded + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *RaftPeerByAddressRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Address = "" + } else { + yyv15 := &x.Address + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv17 := &x.Region + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv21 := &x.AuthToken + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv23 := &x.Forwarded + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *RaftPeerByIDRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.ID) { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.ID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("ID")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.ID) { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.ID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Region)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Region")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Region)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Namespace)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Namespace)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.AuthToken)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("AuthToken")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.AuthToken)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Forwarded")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *RaftPeerByIDRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *RaftPeerByIDRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "ID": + if r.TryDecodeAsNil() { + x.ID = "" + } else { + yyv4 := &x.ID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Region": + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv6 := &x.Region + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv8 := &x.Namespace + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "AuthToken": + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv10 := &x.AuthToken + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "Forwarded": + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv12 := &x.Forwarded + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *RaftPeerByIDRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.ID = "" + } else { + yyv15 := &x.ID + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv17 := &x.Region + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv21 := &x.AuthToken + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv23 := &x.Forwarded + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *AutopilotSetConfigRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 7 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Datacenter)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Datacenter")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Datacenter)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yy7 := &x.Config + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Config")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yy9 := &x.Config + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeBool(bool(x.CAS)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("CAS")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.CAS)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Region)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Region")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Region)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Namespace)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Namespace)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.AuthToken)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("AuthToken")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.AuthToken)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Forwarded")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *AutopilotSetConfigRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *AutopilotSetConfigRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Datacenter": + if r.TryDecodeAsNil() { + x.Datacenter = "" + } else { + yyv4 := &x.Datacenter + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Config": + if r.TryDecodeAsNil() { + x.Config = AutopilotConfig{} + } else { + yyv6 := &x.Config + yyv6.CodecDecodeSelf(d) + } + case "CAS": + if r.TryDecodeAsNil() { + x.CAS = false + } else { + yyv7 := &x.CAS + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*bool)(yyv7)) = r.DecodeBool() + } + } + case "Region": + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv9 := &x.Region + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + case "Namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv11 := &x.Namespace + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "AuthToken": + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv13 := &x.AuthToken + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "Forwarded": + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv15 := &x.Forwarded + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *AutopilotSetConfigRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj17 int + var yyb17 bool + var yyhl17 bool = l >= 0 + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Datacenter = "" + } else { + yyv18 := &x.Datacenter + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Config = AutopilotConfig{} + } else { + yyv20 := &x.Config + yyv20.CodecDecodeSelf(d) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.CAS = false + } else { + yyv21 := &x.CAS + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv23 := &x.Region + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv25 := &x.Namespace + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv27 := &x.AuthToken + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv29 := &x.Forwarded + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + for { + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj17-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *AutopilotConfig) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [9]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(9) + } else { + yynn2 = 9 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.CleanupDeadServers)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("CleanupDeadServers")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.CleanupDeadServers)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.ServerStabilizationTime) { + } else { + r.EncodeInt(int64(x.ServerStabilizationTime)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("ServerStabilizationTime")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.ServerStabilizationTime) { + } else { + r.EncodeInt(int64(x.ServerStabilizationTime)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContactThreshold) { + } else { + r.EncodeInt(int64(x.LastContactThreshold)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("LastContactThreshold")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContactThreshold) { + } else { + r.EncodeInt(int64(x.LastContactThreshold)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.MaxTrailingLogs)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("MaxTrailingLogs")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeUint(uint64(x.MaxTrailingLogs)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.EnableRedundancyZones)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("EnableRedundancyZones")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.EnableRedundancyZones)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.DisableUpgradeMigration)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("DisableUpgradeMigration")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.DisableUpgradeMigration)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.EnableCustomUpgrades)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("EnableCustomUpgrades")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(x.EnableCustomUpgrades)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeUint(uint64(x.CreateIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("CreateIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeUint(uint64(x.CreateIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeUint(uint64(x.ModifyIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("ModifyIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeUint(uint64(x.ModifyIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *AutopilotConfig) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *AutopilotConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "CleanupDeadServers": + if r.TryDecodeAsNil() { + x.CleanupDeadServers = false + } else { + yyv4 := &x.CleanupDeadServers + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "ServerStabilizationTime": + if r.TryDecodeAsNil() { + x.ServerStabilizationTime = 0 + } else { + yyv6 := &x.ServerStabilizationTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else { + *((*int64)(yyv6)) = int64(r.DecodeInt(64)) + } + } + case "LastContactThreshold": + if r.TryDecodeAsNil() { + x.LastContactThreshold = 0 + } else { + yyv8 := &x.LastContactThreshold + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + *((*int64)(yyv8)) = int64(r.DecodeInt(64)) + } + } + case "MaxTrailingLogs": + if r.TryDecodeAsNil() { + x.MaxTrailingLogs = 0 + } else { + yyv10 := &x.MaxTrailingLogs + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) + } + } + case "EnableRedundancyZones": + if r.TryDecodeAsNil() { + x.EnableRedundancyZones = false + } else { + yyv12 := &x.EnableRedundancyZones + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "DisableUpgradeMigration": + if r.TryDecodeAsNil() { + x.DisableUpgradeMigration = false + } else { + yyv14 := &x.DisableUpgradeMigration + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "EnableCustomUpgrades": + if r.TryDecodeAsNil() { + x.EnableCustomUpgrades = false + } else { + yyv16 := &x.EnableCustomUpgrades + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*bool)(yyv16)) = r.DecodeBool() + } + } + case "CreateIndex": + if r.TryDecodeAsNil() { + x.CreateIndex = 0 + } else { + yyv18 := &x.CreateIndex + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*uint64)(yyv18)) = uint64(r.DecodeUint(64)) + } + } + case "ModifyIndex": + if r.TryDecodeAsNil() { + x.ModifyIndex = 0 + } else { + yyv20 := &x.ModifyIndex + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*uint64)(yyv20)) = uint64(r.DecodeUint(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *AutopilotConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj22 int + var yyb22 bool + var yyhl22 bool = l >= 0 + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.CleanupDeadServers = false + } else { + yyv23 := &x.CleanupDeadServers + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.ServerStabilizationTime = 0 + } else { + yyv25 := &x.ServerStabilizationTime + yym26 := z.DecBinary() + _ = yym26 + if false { + } else if z.HasExtensions() && z.DecExt(yyv25) { + } else { + *((*int64)(yyv25)) = int64(r.DecodeInt(64)) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.LastContactThreshold = 0 + } else { + yyv27 := &x.LastContactThreshold + yym28 := z.DecBinary() + _ = yym28 + if false { + } else if z.HasExtensions() && z.DecExt(yyv27) { + } else { + *((*int64)(yyv27)) = int64(r.DecodeInt(64)) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.MaxTrailingLogs = 0 + } else { + yyv29 := &x.MaxTrailingLogs + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*uint64)(yyv29)) = uint64(r.DecodeUint(64)) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.EnableRedundancyZones = false + } else { + yyv31 := &x.EnableRedundancyZones + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.DisableUpgradeMigration = false + } else { + yyv33 := &x.DisableUpgradeMigration + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.EnableCustomUpgrades = false + } else { + yyv35 := &x.EnableCustomUpgrades + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*bool)(yyv35)) = r.DecodeBool() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.CreateIndex = 0 + } else { + yyv37 := &x.CreateIndex + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*uint64)(yyv37)) = uint64(r.DecodeUint(64)) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.ModifyIndex = 0 + } else { + yyv39 := &x.ModifyIndex + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*uint64)(yyv39)) = uint64(r.DecodeUint(64)) + } + } + for { + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj22-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *SchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yy4 := &x.PreemptionConfig + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("PreemptionConfig")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yy6 := &x.PreemptionConfig + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeUint(uint64(x.CreateIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("CreateIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeUint(uint64(x.CreateIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeUint(uint64(x.ModifyIndex)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("ModifyIndex")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.ModifyIndex)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *SchedulerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *SchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "PreemptionConfig": + if r.TryDecodeAsNil() { + x.PreemptionConfig = PreemptionConfig{} + } else { + yyv4 := &x.PreemptionConfig + yyv4.CodecDecodeSelf(d) + } + case "CreateIndex": + if r.TryDecodeAsNil() { + x.CreateIndex = 0 + } else { + yyv5 := &x.CreateIndex + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*uint64)(yyv5)) = uint64(r.DecodeUint(64)) + } + } + case "ModifyIndex": + if r.TryDecodeAsNil() { + x.ModifyIndex = 0 + } else { + yyv7 := &x.ModifyIndex + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*uint64)(yyv7)) = uint64(r.DecodeUint(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *SchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.PreemptionConfig = PreemptionConfig{} + } else { + yyv10 := &x.PreemptionConfig + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.CreateIndex = 0 + } else { + yyv11 := &x.CreateIndex + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.ModifyIndex = 0 + } else { + yyv13 := &x.ModifyIndex + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *SchedulerConfigurationResponse) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if x.SchedulerConfig == nil { + r.EncodeNil() + } else { + x.SchedulerConfig.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("SchedulerConfig")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + if x.SchedulerConfig == nil { + r.EncodeNil() + } else { + x.SchedulerConfig.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Index")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContact) { + } else { + r.EncodeInt(int64(x.LastContact)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("LastContact")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastContact) { + } else { + r.EncodeInt(int64(x.LastContact)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.KnownLeader)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("KnownLeader")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.KnownLeader)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *SchedulerConfigurationResponse) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *SchedulerConfigurationResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "SchedulerConfig": + if r.TryDecodeAsNil() { + if x.SchedulerConfig != nil { + x.SchedulerConfig = nil + } + } else { + if x.SchedulerConfig == nil { + x.SchedulerConfig = new(SchedulerConfiguration) + } + x.SchedulerConfig.CodecDecodeSelf(d) + } + case "Index": + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv5 := &x.Index + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*uint64)(yyv5)) = uint64(r.DecodeUint(64)) + } + } + case "LastContact": + if r.TryDecodeAsNil() { + x.LastContact = 0 + } else { + yyv7 := &x.LastContact + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + case "KnownLeader": + if r.TryDecodeAsNil() { + x.KnownLeader = false + } else { + yyv9 := &x.KnownLeader + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *SchedulerConfigurationResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + if x.SchedulerConfig != nil { + x.SchedulerConfig = nil + } + } else { + if x.SchedulerConfig == nil { + x.SchedulerConfig = new(SchedulerConfiguration) + } + x.SchedulerConfig.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv13 := &x.Index + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.LastContact = 0 + } else { + yyv15 := &x.LastContact + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else { + *((*int64)(yyv15)) = int64(r.DecodeInt(64)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.KnownLeader = false + } else { + yyv17 := &x.KnownLeader + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *SchedulerSetConfigurationResponse) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Updated)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Updated")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Updated)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Index")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *SchedulerSetConfigurationResponse) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *SchedulerSetConfigurationResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Updated": + if r.TryDecodeAsNil() { + x.Updated = false + } else { + yyv4 := &x.Updated + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "Index": + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv6 := &x.Index + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *SchedulerSetConfigurationResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Updated = false + } else { + yyv9 := &x.Updated + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv11 := &x.Index + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *PreemptionConfig) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.SystemSchedulerEnabled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("SystemSchedulerEnabled")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.SystemSchedulerEnabled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *PreemptionConfig) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *PreemptionConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "SystemSchedulerEnabled": + if r.TryDecodeAsNil() { + x.SystemSchedulerEnabled = false + } else { + yyv4 := &x.SystemSchedulerEnabled + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *PreemptionConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.SystemSchedulerEnabled = false + } else { + yyv7 := &x.SystemSchedulerEnabled + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*bool)(yyv7)) = r.DecodeBool() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *SchedulerSetConfigRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 6 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yy4 := &x.Config + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Config")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yy6 := &x.Config + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeBool(bool(x.CAS)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("CAS")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.CAS)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Region)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Region")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Region)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Namespace)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Namespace)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.AuthToken)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("AuthToken")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.AuthToken)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Forwarded")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.Forwarded)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *SchedulerSetConfigRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *SchedulerSetConfigRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Config": + if r.TryDecodeAsNil() { + x.Config = SchedulerConfiguration{} + } else { + yyv4 := &x.Config + yyv4.CodecDecodeSelf(d) + } + case "CAS": + if r.TryDecodeAsNil() { + x.CAS = false + } else { + yyv5 := &x.CAS + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*bool)(yyv5)) = r.DecodeBool() + } + } + case "Region": + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv7 := &x.Region + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "Namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv9 := &x.Namespace + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + case "AuthToken": + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv11 := &x.AuthToken + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "Forwarded": + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv13 := &x.Forwarded + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *SchedulerSetConfigRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Config = SchedulerConfiguration{} + } else { + yyv16 := &x.Config + yyv16.CodecDecodeSelf(d) + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.CAS = false + } else { + yyv17 := &x.CAS + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Region = "" + } else { + yyv19 := &x.Region + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv21 := &x.Namespace + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.AuthToken = "" + } else { + yyv23 := &x.AuthToken + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Forwarded = false + } else { + yyv25 := &x.Forwarded + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *StreamingRpcHeader) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Method)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Method")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Method)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *StreamingRpcHeader) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *StreamingRpcHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Method": + if r.TryDecodeAsNil() { + x.Method = "" + } else { + yyv4 := &x.Method + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *StreamingRpcHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Method = "" + } else { + yyv7 := &x.Method + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *StreamingRpcAck) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Error)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey100) + r.EncodeString(codecSelferC_UTF8100, string("Error")) + z.EncSendContainerState(codecSelfer_containerMapValue100) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(x.Error)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *StreamingRpcAck) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *StreamingRpcAck) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + case "Error": + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv4 := &x.Error + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *StreamingRpcAck) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv7 := &x.Error + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x *StreamingRpcRegistry) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [0]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(0) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd100) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd100) + } + } + } +} + +func (x *StreamingRpcRegistry) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap100 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd100) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray100 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd100) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr100) + } + } +} + +func (x *StreamingRpcRegistry) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey100) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue100) + switch yys3 { + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x *StreamingRpcRegistry) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj4 int + var yyb4 bool + var yyhl4 bool = l >= 0 + for { + yyj4++ + if yyhl4 { + yyb4 = yyj4 > l + } else { + yyb4 = r.CheckBreak() + } + if yyb4 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem100) + z.DecStructFieldNotFound(yyj4-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x codecSelfer100) encBitmap(v Bitmap, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeStringBytes(codecSelferC_RAW100, []byte(v)) +} + +func (x codecSelfer100) decBitmap(v *Bitmap, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + *v = r.DecodeBytes(*((*[]byte)(v)), false, false) +} + +func (x codecSelfer100) encSlicePtrtoFieldDiff(v []*FieldDiff, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x codecSelfer100) decSlicePtrtoFieldDiff(v *[]*FieldDiff, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*FieldDiff{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*FieldDiff, yyrl1) + } + } else { + yyv1 = make([]*FieldDiff, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = FieldDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(FieldDiff) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = FieldDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(FieldDiff) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *FieldDiff + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = FieldDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(FieldDiff) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*FieldDiff{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer100) encSlicePtrtoObjectDiff(v []*ObjectDiff, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x codecSelfer100) decSlicePtrtoObjectDiff(v *[]*ObjectDiff, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*ObjectDiff{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*ObjectDiff, yyrl1) + } + } else { + yyv1 = make([]*ObjectDiff, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = ObjectDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(ObjectDiff) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = ObjectDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(ObjectDiff) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *ObjectDiff + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = ObjectDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(ObjectDiff) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*ObjectDiff{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer100) encSlicePtrtoTaskGroupDiff(v []*TaskGroupDiff, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x codecSelfer100) decSlicePtrtoTaskGroupDiff(v *[]*TaskGroupDiff, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*TaskGroupDiff{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*TaskGroupDiff, yyrl1) + } + } else { + yyv1 = make([]*TaskGroupDiff, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskGroupDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskGroupDiff) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskGroupDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskGroupDiff) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *TaskGroupDiff + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskGroupDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskGroupDiff) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*TaskGroupDiff{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer100) encSlicePtrtoTaskDiff(v []*TaskDiff, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x codecSelfer100) decSlicePtrtoTaskDiff(v *[]*TaskDiff, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*TaskDiff{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*TaskDiff, yyrl1) + } + } else { + yyv1 = make([]*TaskDiff, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskDiff) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskDiff) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *TaskDiff + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskDiff) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*TaskDiff{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer100) encTaskGroupDiffs(v TaskGroupDiffs, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x codecSelfer100) decTaskGroupDiffs(v *TaskGroupDiffs, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*TaskGroupDiff{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*TaskGroupDiff, yyrl1) + } + } else { + yyv1 = make([]*TaskGroupDiff, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskGroupDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskGroupDiff) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskGroupDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskGroupDiff) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *TaskGroupDiff + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskGroupDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskGroupDiff) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*TaskGroupDiff{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer100) encTaskDiffs(v TaskDiffs, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x codecSelfer100) decTaskDiffs(v *TaskDiffs, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*TaskDiff{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*TaskDiff, yyrl1) + } + } else { + yyv1 = make([]*TaskDiff, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskDiff) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskDiff) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *TaskDiff + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = TaskDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(TaskDiff) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*TaskDiff{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer100) encObjectDiffs(v ObjectDiffs, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x codecSelfer100) decObjectDiffs(v *ObjectDiffs, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*ObjectDiff{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*ObjectDiff, yyrl1) + } + } else { + yyv1 = make([]*ObjectDiff, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = ObjectDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(ObjectDiff) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = ObjectDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(ObjectDiff) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *ObjectDiff + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = ObjectDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(ObjectDiff) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*ObjectDiff{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer100) encFieldDiffs(v FieldDiffs, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x codecSelfer100) decFieldDiffs(v *FieldDiffs, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*FieldDiff{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*FieldDiff, yyrl1) + } + } else { + yyv1 = make([]*FieldDiff, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = FieldDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(FieldDiff) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = FieldDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(FieldDiff) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *FieldDiff + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = FieldDiff{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(FieldDiff) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*FieldDiff{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer100) encSlicePtrtoNetworkResource(v []*NetworkResource, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if yyv1 == nil { + r.EncodeNil() + } else { + yym2 := z.EncBinary() + _ = yym2 + if false { + } else if z.HasExtensions() && z.EncExt(yyv1) { + } else { + z.EncFallback(yyv1) + } + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x codecSelfer100) decSlicePtrtoNetworkResource(v *[]*NetworkResource, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*NetworkResource{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*NetworkResource, yyrl1) + } + } else { + yyv1 = make([]*NetworkResource, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = NetworkResource{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(NetworkResource) + } + yyw2 := yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.DecExt(yyw2) { + } else { + z.DecFallback(yyw2, false) + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = NetworkResource{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(NetworkResource) + } + yyw4 := yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyw4) { + } else { + z.DecFallback(yyw4, false) + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *NetworkResource + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = NetworkResource{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(NetworkResource) + } + yyw6 := yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyw6) { + } else { + z.DecFallback(yyw6, false) + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*NetworkResource{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer100) encMapstringBitmap(v map[string]Bitmap, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey100) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF8100, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue100) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x codecSelfer100) decMapstringBitmap(v *map[string]Bitmap, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]Bitmap, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 Bitmap + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey100) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue100) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey100) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue100) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd100) +} + +func (x codecSelfer100) encSlicePtrtoRaftServer(v []*RaftServer, e *codec1978.Encoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem100) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd100) +} + +func (x codecSelfer100) decSlicePtrtoRaftServer(v *[]*RaftServer, d *codec1978.Decoder) { + var h codecSelfer100 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*RaftServer{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*RaftServer, yyrl1) + } + } else { + yyv1 = make([]*RaftServer, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = RaftServer{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(RaftServer) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = RaftServer{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(RaftServer) + } + yyw3 := yyv1[yyj1] + yyw3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *RaftServer + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = RaftServer{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(RaftServer) + } + yyw4 := yyv1[yyj1] + yyw4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*RaftServer{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go new file mode 100644 index 0000000..88b2db3 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go @@ -0,0 +1,8949 @@ +package structs + +import ( + "bytes" + "container/heap" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "net" + "net/url" + "os" + "path/filepath" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/gorhill/cronexpr" + "github.com/hashicorp/consul/api" + hcodec "github.com/hashicorp/go-msgpack/codec" + multierror "github.com/hashicorp/go-multierror" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/args" + "github.com/hashicorp/nomad/helper/uuid" + "github.com/hashicorp/nomad/lib/kheap" + psstructs "github.com/hashicorp/nomad/plugins/shared/structs" + "github.com/mitchellh/copystructure" + "github.com/ugorji/go/codec" + "golang.org/x/crypto/blake2b" +) + +var ( + // validPolicyName is used to validate a policy name + validPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$") + + // b32 is a lowercase base32 encoding for use in URL friendly service hashes + b32 = base32.NewEncoding(strings.ToLower("abcdefghijklmnopqrstuvwxyz234567")) +) + +type MessageType uint8 + +const ( + NodeRegisterRequestType MessageType = iota + NodeDeregisterRequestType + NodeUpdateStatusRequestType + NodeUpdateDrainRequestType + JobRegisterRequestType + JobDeregisterRequestType + EvalUpdateRequestType + EvalDeleteRequestType + AllocUpdateRequestType + AllocClientUpdateRequestType + ReconcileJobSummariesRequestType + VaultAccessorRegisterRequestType + VaultAccessorDeregisterRequestType + ApplyPlanResultsRequestType + DeploymentStatusUpdateRequestType + DeploymentPromoteRequestType + DeploymentAllocHealthRequestType + DeploymentDeleteRequestType + JobStabilityRequestType + ACLPolicyUpsertRequestType + ACLPolicyDeleteRequestType + ACLTokenUpsertRequestType + ACLTokenDeleteRequestType + ACLTokenBootstrapRequestType + AutopilotRequestType + UpsertNodeEventsType + JobBatchDeregisterRequestType + AllocUpdateDesiredTransitionRequestType + NodeUpdateEligibilityRequestType + BatchNodeUpdateDrainRequestType + SchedulerConfigRequestType +) + +const ( + // IgnoreUnknownTypeFlag is set along with a MessageType + // to indicate that the message type can be safely ignored + // if it is not recognized. This is for future proofing, so + // that new commands can be added in a way that won't cause + // old servers to crash when the FSM attempts to process them. + IgnoreUnknownTypeFlag MessageType = 128 + + // ApiMajorVersion is returned as part of the Status.Version request. + // It should be incremented anytime the APIs are changed in a way + // that would break clients for sane client versioning. + ApiMajorVersion = 1 + + // ApiMinorVersion is returned as part of the Status.Version request. + // It should be incremented anytime the APIs are changed to allow + // for sane client versioning. Minor changes should be compatible + // within the major version. + ApiMinorVersion = 1 + + ProtocolVersion = "protocol" + APIMajorVersion = "api.major" + APIMinorVersion = "api.minor" + + GetterModeAny = "any" + GetterModeFile = "file" + GetterModeDir = "dir" + + // maxPolicyDescriptionLength limits a policy description length + maxPolicyDescriptionLength = 256 + + // maxTokenNameLength limits a ACL token name length + maxTokenNameLength = 256 + + // ACLClientToken and ACLManagementToken are the only types of tokens + ACLClientToken = "client" + ACLManagementToken = "management" + + // DefaultNamespace is the default namespace. + DefaultNamespace = "default" + DefaultNamespaceDescription = "Default shared namespace" + + // JitterFraction is a the limit to the amount of jitter we apply + // to a user specified MaxQueryTime. We divide the specified time by + // the fraction. So 16 == 6.25% limit of jitter. This jitter is also + // applied to RPCHoldTimeout. + JitterFraction = 16 + + // MaxRetainedNodeEvents is the maximum number of node events that will be + // retained for a single node + MaxRetainedNodeEvents = 10 + + // MaxRetainedNodeScores is the number of top scoring nodes for which we + // retain scoring metadata + MaxRetainedNodeScores = 5 + + // Normalized scorer name + NormScorerName = "normalized-score" +) + +// Context defines the scope in which a search for Nomad object operates, and +// is also used to query the matching index value for this context +type Context string + +const ( + Allocs Context = "allocs" + Deployments Context = "deployment" + Evals Context = "evals" + Jobs Context = "jobs" + Nodes Context = "nodes" + Namespaces Context = "namespaces" + Quotas Context = "quotas" + All Context = "all" +) + +// NamespacedID is a tuple of an ID and a namespace +type NamespacedID struct { + ID string + Namespace string +} + +// NewNamespacedID returns a new namespaced ID given the ID and namespace +func NewNamespacedID(id, ns string) NamespacedID { + return NamespacedID{ + ID: id, + Namespace: ns, + } +} + +func (n NamespacedID) String() string { + return fmt.Sprintf("", n.Namespace, n.ID) +} + +// RPCInfo is used to describe common information about query +type RPCInfo interface { + RequestRegion() string + IsRead() bool + AllowStaleRead() bool + IsForwarded() bool + SetForwarded() +} + +// InternalRpcInfo allows adding internal RPC metadata to an RPC. This struct +// should NOT be replicated in the API package as it is internal only. +type InternalRpcInfo struct { + // Forwarded marks whether the RPC has been forwarded. + Forwarded bool +} + +// IsForwarded returns whether the RPC is forwarded from another server. +func (i *InternalRpcInfo) IsForwarded() bool { + return i.Forwarded +} + +// SetForwarded marks that the RPC is being forwarded from another server. +func (i *InternalRpcInfo) SetForwarded() { + i.Forwarded = true +} + +// QueryOptions is used to specify various flags for read queries +type QueryOptions struct { + // The target region for this query + Region string + + // Namespace is the target namespace for the query. + Namespace string + + // If set, wait until query exceeds given index. Must be provided + // with MaxQueryTime. + MinQueryIndex uint64 + + // Provided with MinQueryIndex to wait for change. + MaxQueryTime time.Duration + + // If set, any follower can service the request. Results + // may be arbitrarily stale. + AllowStale bool + + // If set, used as prefix for resource list searches + Prefix string + + // AuthToken is secret portion of the ACL token used for the request + AuthToken string + + InternalRpcInfo +} + +func (q QueryOptions) RequestRegion() string { + return q.Region +} + +func (q QueryOptions) RequestNamespace() string { + if q.Namespace == "" { + return DefaultNamespace + } + return q.Namespace +} + +// QueryOption only applies to reads, so always true +func (q QueryOptions) IsRead() bool { + return true +} + +func (q QueryOptions) AllowStaleRead() bool { + return q.AllowStale +} + +type WriteRequest struct { + // The target region for this write + Region string + + // Namespace is the target namespace for the write. + Namespace string + + // AuthToken is secret portion of the ACL token used for the request + AuthToken string + + InternalRpcInfo +} + +func (w WriteRequest) RequestRegion() string { + // The target region for this request + return w.Region +} + +func (w WriteRequest) RequestNamespace() string { + if w.Namespace == "" { + return DefaultNamespace + } + return w.Namespace +} + +// WriteRequest only applies to writes, always false +func (w WriteRequest) IsRead() bool { + return false +} + +func (w WriteRequest) AllowStaleRead() bool { + return false +} + +// QueryMeta allows a query response to include potentially +// useful metadata about a query +type QueryMeta struct { + // This is the index associated with the read + Index uint64 + + // If AllowStale is used, this is time elapsed since + // last contact between the follower and leader. This + // can be used to gauge staleness. + LastContact time.Duration + + // Used to indicate if there is a known leader node + KnownLeader bool +} + +// WriteMeta allows a write response to include potentially +// useful metadata about the write +type WriteMeta struct { + // This is the index associated with the write + Index uint64 +} + +// NodeRegisterRequest is used for Node.Register endpoint +// to register a node as being a schedulable entity. +type NodeRegisterRequest struct { + Node *Node + NodeEvent *NodeEvent + WriteRequest +} + +// NodeDeregisterRequest is used for Node.Deregister endpoint +// to deregister a node as being a schedulable entity. +type NodeDeregisterRequest struct { + NodeID string + WriteRequest +} + +// NodeServerInfo is used to in NodeUpdateResponse to return Nomad server +// information used in RPC server lists. +type NodeServerInfo struct { + // RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to + // be contacted at for RPCs. + RPCAdvertiseAddr string + + // RpcMajorVersion is the major version number the Nomad Server + // supports + RPCMajorVersion int32 + + // RpcMinorVersion is the minor version number the Nomad Server + // supports + RPCMinorVersion int32 + + // Datacenter is the datacenter that a Nomad server belongs to + Datacenter string +} + +// NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint +// to update the status of a node. +type NodeUpdateStatusRequest struct { + NodeID string + Status string + NodeEvent *NodeEvent + WriteRequest +} + +// NodeUpdateDrainRequest is used for updating the drain strategy +type NodeUpdateDrainRequest struct { + NodeID string + DrainStrategy *DrainStrategy + + // COMPAT Remove in version 0.10 + // As part of Nomad 0.8 we have deprecated the drain boolean in favor of a + // drain strategy but we need to handle the upgrade path where the Raft log + // contains drain updates with just the drain boolean being manipulated. + Drain bool + + // MarkEligible marks the node as eligible if removing the drain strategy. + MarkEligible bool + + // NodeEvent is the event added to the node + NodeEvent *NodeEvent + + WriteRequest +} + +// BatchNodeUpdateDrainRequest is used for updating the drain strategy for a +// batch of nodes +type BatchNodeUpdateDrainRequest struct { + // Updates is a mapping of nodes to their updated drain strategy + Updates map[string]*DrainUpdate + + // NodeEvents is a mapping of the node to the event to add to the node + NodeEvents map[string]*NodeEvent + + WriteRequest +} + +// DrainUpdate is used to update the drain of a node +type DrainUpdate struct { + // DrainStrategy is the new strategy for the node + DrainStrategy *DrainStrategy + + // MarkEligible marks the node as eligible if removing the drain strategy. + MarkEligible bool +} + +// NodeUpdateEligibilityRequest is used for updating the scheduling eligibility +type NodeUpdateEligibilityRequest struct { + NodeID string + Eligibility string + + // NodeEvent is the event added to the node + NodeEvent *NodeEvent + + WriteRequest +} + +// NodeEvaluateRequest is used to re-evaluate the node +type NodeEvaluateRequest struct { + NodeID string + WriteRequest +} + +// NodeSpecificRequest is used when we just need to specify a target node +type NodeSpecificRequest struct { + NodeID string + SecretID string + QueryOptions +} + +// SearchResponse is used to return matches and information about whether +// the match list is truncated specific to each type of context. +type SearchResponse struct { + // Map of context types to ids which match a specified prefix + Matches map[Context][]string + + // Truncations indicates whether the matches for a particular context have + // been truncated + Truncations map[Context]bool + + QueryMeta +} + +// SearchRequest is used to parameterize a request, and returns a +// list of matches made up of jobs, allocations, evaluations, and/or nodes, +// along with whether or not the information returned is truncated. +type SearchRequest struct { + // Prefix is what ids are matched to. I.e, if the given prefix were + // "a", potential matches might be "abcd" or "aabb" + Prefix string + + // Context is the type that can be matched against. A context can be a job, + // node, evaluation, allocation, or empty (indicated every context should be + // matched) + Context Context + + QueryOptions +} + +// JobRegisterRequest is used for Job.Register endpoint +// to register a job as being a schedulable entity. +type JobRegisterRequest struct { + Job *Job + + // If EnforceIndex is set then the job will only be registered if the passed + // JobModifyIndex matches the current Jobs index. If the index is zero, the + // register only occurs if the job is new. + EnforceIndex bool + JobModifyIndex uint64 + + // PolicyOverride is set when the user is attempting to override any policies + PolicyOverride bool + + WriteRequest +} + +// JobDeregisterRequest is used for Job.Deregister endpoint +// to deregister a job as being a schedulable entity. +type JobDeregisterRequest struct { + JobID string + + // Purge controls whether the deregister purges the job from the system or + // whether the job is just marked as stopped and will be removed by the + // garbage collector + Purge bool + + WriteRequest +} + +// JobBatchDeregisterRequest is used to batch deregister jobs and upsert +// evaluations. +type JobBatchDeregisterRequest struct { + // Jobs is the set of jobs to deregister + Jobs map[NamespacedID]*JobDeregisterOptions + + // Evals is the set of evaluations to create. + Evals []*Evaluation + + WriteRequest +} + +// JobDeregisterOptions configures how a job is deregistered. +type JobDeregisterOptions struct { + // Purge controls whether the deregister purges the job from the system or + // whether the job is just marked as stopped and will be removed by the + // garbage collector + Purge bool +} + +// JobEvaluateRequest is used when we just need to re-evaluate a target job +type JobEvaluateRequest struct { + JobID string + EvalOptions EvalOptions + WriteRequest +} + +// EvalOptions is used to encapsulate options when forcing a job evaluation +type EvalOptions struct { + ForceReschedule bool +} + +// JobSpecificRequest is used when we just need to specify a target job +type JobSpecificRequest struct { + JobID string + AllAllocs bool + QueryOptions +} + +// JobListRequest is used to parameterize a list request +type JobListRequest struct { + QueryOptions +} + +// JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run +// evaluation of the Job. +type JobPlanRequest struct { + Job *Job + Diff bool // Toggles an annotated diff + // PolicyOverride is set when the user is attempting to override any policies + PolicyOverride bool + WriteRequest +} + +// JobSummaryRequest is used when we just need to get a specific job summary +type JobSummaryRequest struct { + JobID string + QueryOptions +} + +// JobDispatchRequest is used to dispatch a job based on a parameterized job +type JobDispatchRequest struct { + JobID string + Payload []byte + Meta map[string]string + WriteRequest +} + +// JobValidateRequest is used to validate a job +type JobValidateRequest struct { + Job *Job + WriteRequest +} + +// JobRevertRequest is used to revert a job to a prior version. +type JobRevertRequest struct { + // JobID is the ID of the job being reverted + JobID string + + // JobVersion the version to revert to. + JobVersion uint64 + + // EnforcePriorVersion if set will enforce that the job is at the given + // version before reverting. + EnforcePriorVersion *uint64 + + WriteRequest +} + +// JobStabilityRequest is used to marked a job as stable. +type JobStabilityRequest struct { + // Job to set the stability on + JobID string + JobVersion uint64 + + // Set the stability + Stable bool + WriteRequest +} + +// JobStabilityResponse is the response when marking a job as stable. +type JobStabilityResponse struct { + WriteMeta +} + +// NodeListRequest is used to parameterize a list request +type NodeListRequest struct { + QueryOptions +} + +// EvalUpdateRequest is used for upserting evaluations. +type EvalUpdateRequest struct { + Evals []*Evaluation + EvalToken string + WriteRequest +} + +// EvalDeleteRequest is used for deleting an evaluation. +type EvalDeleteRequest struct { + Evals []string + Allocs []string + WriteRequest +} + +// EvalSpecificRequest is used when we just need to specify a target evaluation +type EvalSpecificRequest struct { + EvalID string + QueryOptions +} + +// EvalAckRequest is used to Ack/Nack a specific evaluation +type EvalAckRequest struct { + EvalID string + Token string + WriteRequest +} + +// EvalDequeueRequest is used when we want to dequeue an evaluation +type EvalDequeueRequest struct { + Schedulers []string + Timeout time.Duration + SchedulerVersion uint16 + WriteRequest +} + +// EvalListRequest is used to list the evaluations +type EvalListRequest struct { + QueryOptions +} + +// PlanRequest is used to submit an allocation plan to the leader +type PlanRequest struct { + Plan *Plan + WriteRequest +} + +// ApplyPlanResultsRequest is used by the planner to apply a Raft transaction +// committing the result of a plan. +type ApplyPlanResultsRequest struct { + // AllocUpdateRequest holds the allocation updates to be made by the + // scheduler. + AllocUpdateRequest + + // Deployment is the deployment created or updated as a result of a + // scheduling event. + Deployment *Deployment + + // DeploymentUpdates is a set of status updates to apply to the given + // deployments. This allows the scheduler to cancel any unneeded deployment + // because the job is stopped or the update block is removed. + DeploymentUpdates []*DeploymentStatusUpdate + + // EvalID is the eval ID of the plan being applied. The modify index of the + // evaluation is updated as part of applying the plan to ensure that subsequent + // scheduling events for the same job will wait for the index that last produced + // state changes. This is necessary for blocked evaluations since they can be + // processed many times, potentially making state updates, without the state of + // the evaluation itself being updated. + EvalID string + + // NodePreemptions is a slice of allocations from other lower priority jobs + // that are preempted. Preempted allocations are marked as evicted. + NodePreemptions []*Allocation + + // PreemptionEvals is a slice of follow up evals for jobs whose allocations + // have been preempted to place allocs in this plan + PreemptionEvals []*Evaluation +} + +// AllocUpdateRequest is used to submit changes to allocations, either +// to cause evictions or to assign new allocations. Both can be done +// within a single transaction +type AllocUpdateRequest struct { + // Alloc is the list of new allocations to assign + Alloc []*Allocation + + // Evals is the list of new evaluations to create + // Evals are valid only when used in the Raft RPC + Evals []*Evaluation + + // Job is the shared parent job of the allocations. + // It is pulled out since it is common to reduce payload size. + Job *Job + + WriteRequest +} + +// AllocUpdateDesiredTransitionRequest is used to submit changes to allocations +// desired transition state. +type AllocUpdateDesiredTransitionRequest struct { + // Allocs is the mapping of allocation ids to their desired state + // transition + Allocs map[string]*DesiredTransition + + // Evals is the set of evaluations to create + Evals []*Evaluation + + WriteRequest +} + +// AllocListRequest is used to request a list of allocations +type AllocListRequest struct { + QueryOptions +} + +// AllocSpecificRequest is used to query a specific allocation +type AllocSpecificRequest struct { + AllocID string + QueryOptions +} + +// AllocsGetRequest is used to query a set of allocations +type AllocsGetRequest struct { + AllocIDs []string + QueryOptions +} + +// PeriodicForceRequest is used to force a specific periodic job. +type PeriodicForceRequest struct { + JobID string + WriteRequest +} + +// ServerMembersResponse has the list of servers in a cluster +type ServerMembersResponse struct { + ServerName string + ServerRegion string + ServerDC string + Members []*ServerMember +} + +// ServerMember holds information about a Nomad server agent in a cluster +type ServerMember struct { + Name string + Addr net.IP + Port uint16 + Tags map[string]string + Status string + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// DeriveVaultTokenRequest is used to request wrapped Vault tokens for the +// following tasks in the given allocation +type DeriveVaultTokenRequest struct { + NodeID string + SecretID string + AllocID string + Tasks []string + QueryOptions +} + +// VaultAccessorsRequest is used to operate on a set of Vault accessors +type VaultAccessorsRequest struct { + Accessors []*VaultAccessor +} + +// VaultAccessor is a reference to a created Vault token on behalf of +// an allocation's task. +type VaultAccessor struct { + AllocID string + Task string + NodeID string + Accessor string + CreationTTL int + + // Raft Indexes + CreateIndex uint64 +} + +// DeriveVaultTokenResponse returns the wrapped tokens for each requested task +type DeriveVaultTokenResponse struct { + // Tasks is a mapping between the task name and the wrapped token + Tasks map[string]string + + // Error stores any error that occurred. Errors are stored here so we can + // communicate whether it is retriable + Error *RecoverableError + + QueryMeta +} + +// GenericRequest is used to request where no +// specific information is needed. +type GenericRequest struct { + QueryOptions +} + +// DeploymentListRequest is used to list the deployments +type DeploymentListRequest struct { + QueryOptions +} + +// DeploymentDeleteRequest is used for deleting deployments. +type DeploymentDeleteRequest struct { + Deployments []string + WriteRequest +} + +// DeploymentStatusUpdateRequest is used to update the status of a deployment as +// well as optionally creating an evaluation atomically. +type DeploymentStatusUpdateRequest struct { + // Eval, if set, is used to create an evaluation at the same time as + // updating the status of a deployment. + Eval *Evaluation + + // DeploymentUpdate is a status update to apply to the given + // deployment. + DeploymentUpdate *DeploymentStatusUpdate + + // Job is used to optionally upsert a job. This is used when setting the + // allocation health results in a deployment failure and the deployment + // auto-reverts to the latest stable job. + Job *Job +} + +// DeploymentAllocHealthRequest is used to set the health of a set of +// allocations as part of a deployment. +type DeploymentAllocHealthRequest struct { + DeploymentID string + + // Marks these allocations as healthy, allow further allocations + // to be rolled. + HealthyAllocationIDs []string + + // Any unhealthy allocations fail the deployment + UnhealthyAllocationIDs []string + + WriteRequest +} + +// ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft +type ApplyDeploymentAllocHealthRequest struct { + DeploymentAllocHealthRequest + + // Timestamp is the timestamp to use when setting the allocations health. + Timestamp time.Time + + // An optional field to update the status of a deployment + DeploymentUpdate *DeploymentStatusUpdate + + // Job is used to optionally upsert a job. This is used when setting the + // allocation health results in a deployment failure and the deployment + // auto-reverts to the latest stable job. + Job *Job + + // An optional evaluation to create after promoting the canaries + Eval *Evaluation +} + +// DeploymentPromoteRequest is used to promote task groups in a deployment +type DeploymentPromoteRequest struct { + DeploymentID string + + // All is to promote all task groups + All bool + + // Groups is used to set the promotion status per task group + Groups []string + + WriteRequest +} + +// ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft +type ApplyDeploymentPromoteRequest struct { + DeploymentPromoteRequest + + // An optional evaluation to create after promoting the canaries + Eval *Evaluation +} + +// DeploymentPauseRequest is used to pause a deployment +type DeploymentPauseRequest struct { + DeploymentID string + + // Pause sets the pause status + Pause bool + + WriteRequest +} + +// DeploymentSpecificRequest is used to make a request specific to a particular +// deployment +type DeploymentSpecificRequest struct { + DeploymentID string + QueryOptions +} + +// DeploymentFailRequest is used to fail a particular deployment +type DeploymentFailRequest struct { + DeploymentID string + WriteRequest +} + +// SingleDeploymentResponse is used to respond with a single deployment +type SingleDeploymentResponse struct { + Deployment *Deployment + QueryMeta +} + +// GenericResponse is used to respond to a request where no +// specific response information is needed. +type GenericResponse struct { + WriteMeta +} + +// VersionResponse is used for the Status.Version response +type VersionResponse struct { + Build string + Versions map[string]int + QueryMeta +} + +// JobRegisterResponse is used to respond to a job registration +type JobRegisterResponse struct { + EvalID string + EvalCreateIndex uint64 + JobModifyIndex uint64 + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string + + QueryMeta +} + +// JobDeregisterResponse is used to respond to a job deregistration +type JobDeregisterResponse struct { + EvalID string + EvalCreateIndex uint64 + JobModifyIndex uint64 + QueryMeta +} + +// JobBatchDeregisterResponse is used to respond to a batch job deregistration +type JobBatchDeregisterResponse struct { + // JobEvals maps the job to its created evaluation + JobEvals map[NamespacedID]string + QueryMeta +} + +// JobValidateResponse is the response from validate request +type JobValidateResponse struct { + // DriverConfigValidated indicates whether the agent validated the driver + // config + DriverConfigValidated bool + + // ValidationErrors is a list of validation errors + ValidationErrors []string + + // Error is a string version of any error that may have occurred + Error string + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string +} + +// NodeUpdateResponse is used to respond to a node update +type NodeUpdateResponse struct { + HeartbeatTTL time.Duration + EvalIDs []string + EvalCreateIndex uint64 + NodeModifyIndex uint64 + + // LeaderRPCAddr is the RPC address of the current Raft Leader. If + // empty, the current Nomad Server is in the minority of a partition. + LeaderRPCAddr string + + // NumNodes is the number of Nomad nodes attached to this quorum of + // Nomad Servers at the time of the response. This value can + // fluctuate based on the health of the cluster between heartbeats. + NumNodes int32 + + // Servers is the full list of known Nomad servers in the local + // region. + Servers []*NodeServerInfo + + QueryMeta +} + +// NodeDrainUpdateResponse is used to respond to a node drain update +type NodeDrainUpdateResponse struct { + NodeModifyIndex uint64 + EvalIDs []string + EvalCreateIndex uint64 + WriteMeta +} + +// NodeEligibilityUpdateResponse is used to respond to a node eligibility update +type NodeEligibilityUpdateResponse struct { + NodeModifyIndex uint64 + EvalIDs []string + EvalCreateIndex uint64 + WriteMeta +} + +// NodeAllocsResponse is used to return allocs for a single node +type NodeAllocsResponse struct { + Allocs []*Allocation + QueryMeta +} + +// NodeClientAllocsResponse is used to return allocs meta data for a single node +type NodeClientAllocsResponse struct { + Allocs map[string]uint64 + + // MigrateTokens are used when ACLs are enabled to allow cross node, + // authenticated access to sticky volumes + MigrateTokens map[string]string + + QueryMeta +} + +// SingleNodeResponse is used to return a single node +type SingleNodeResponse struct { + Node *Node + QueryMeta +} + +// NodeListResponse is used for a list request +type NodeListResponse struct { + Nodes []*NodeListStub + QueryMeta +} + +// SingleJobResponse is used to return a single job +type SingleJobResponse struct { + Job *Job + QueryMeta +} + +// JobSummaryResponse is used to return a single job summary +type JobSummaryResponse struct { + JobSummary *JobSummary + QueryMeta +} + +type JobDispatchResponse struct { + DispatchedJobID string + EvalID string + EvalCreateIndex uint64 + JobCreateIndex uint64 + WriteMeta +} + +// JobListResponse is used for a list request +type JobListResponse struct { + Jobs []*JobListStub + QueryMeta +} + +// JobVersionsRequest is used to get a jobs versions +type JobVersionsRequest struct { + JobID string + Diffs bool + QueryOptions +} + +// JobVersionsResponse is used for a job get versions request +type JobVersionsResponse struct { + Versions []*Job + Diffs []*JobDiff + QueryMeta +} + +// JobPlanResponse is used to respond to a job plan request +type JobPlanResponse struct { + // Annotations stores annotations explaining decisions the scheduler made. + Annotations *PlanAnnotations + + // FailedTGAllocs is the placement failures per task group. + FailedTGAllocs map[string]*AllocMetric + + // JobModifyIndex is the modification index of the job. The value can be + // used when running `nomad run` to ensure that the Job wasn’t modified + // since the last plan. If the job is being created, the value is zero. + JobModifyIndex uint64 + + // CreatedEvals is the set of evaluations created by the scheduler. The + // reasons for this can be rolling-updates or blocked evals. + CreatedEvals []*Evaluation + + // Diff contains the diff of the job and annotations on whether the change + // causes an in-place update or create/destroy + Diff *JobDiff + + // NextPeriodicLaunch is the time duration till the job would be launched if + // submitted. + NextPeriodicLaunch time.Time + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string + + WriteMeta +} + +// SingleAllocResponse is used to return a single allocation +type SingleAllocResponse struct { + Alloc *Allocation + QueryMeta +} + +// AllocsGetResponse is used to return a set of allocations +type AllocsGetResponse struct { + Allocs []*Allocation + QueryMeta +} + +// JobAllocationsResponse is used to return the allocations for a job +type JobAllocationsResponse struct { + Allocations []*AllocListStub + QueryMeta +} + +// JobEvaluationsResponse is used to return the evaluations for a job +type JobEvaluationsResponse struct { + Evaluations []*Evaluation + QueryMeta +} + +// SingleEvalResponse is used to return a single evaluation +type SingleEvalResponse struct { + Eval *Evaluation + QueryMeta +} + +// EvalDequeueResponse is used to return from a dequeue +type EvalDequeueResponse struct { + Eval *Evaluation + Token string + + // WaitIndex is the Raft index the worker should wait until invoking the + // scheduler. + WaitIndex uint64 + + QueryMeta +} + +// GetWaitIndex is used to retrieve the Raft index in which state should be at +// or beyond before invoking the scheduler. +func (e *EvalDequeueResponse) GetWaitIndex() uint64 { + // Prefer the wait index sent. This will be populated on all responses from + // 0.7.0 and above + if e.WaitIndex != 0 { + return e.WaitIndex + } else if e.Eval != nil { + return e.Eval.ModifyIndex + } + + // This should never happen + return 1 +} + +// PlanResponse is used to return from a PlanRequest +type PlanResponse struct { + Result *PlanResult + WriteMeta +} + +// AllocListResponse is used for a list request +type AllocListResponse struct { + Allocations []*AllocListStub + QueryMeta +} + +// DeploymentListResponse is used for a list request +type DeploymentListResponse struct { + Deployments []*Deployment + QueryMeta +} + +// EvalListResponse is used for a list request +type EvalListResponse struct { + Evaluations []*Evaluation + QueryMeta +} + +// EvalAllocationsResponse is used to return the allocations for an evaluation +type EvalAllocationsResponse struct { + Allocations []*AllocListStub + QueryMeta +} + +// PeriodicForceResponse is used to respond to a periodic job force launch +type PeriodicForceResponse struct { + EvalID string + EvalCreateIndex uint64 + WriteMeta +} + +// DeploymentUpdateResponse is used to respond to a deployment change. The +// response will include the modify index of the deployment as well as details +// of any triggered evaluation. +type DeploymentUpdateResponse struct { + EvalID string + EvalCreateIndex uint64 + DeploymentModifyIndex uint64 + + // RevertedJobVersion is the version the job was reverted to. If unset, the + // job wasn't reverted + RevertedJobVersion *uint64 + + WriteMeta +} + +// NodeConnQueryResponse is used to respond to a query of whether a server has +// a connection to a specific Node +type NodeConnQueryResponse struct { + // Connected indicates whether a connection to the Client exists + Connected bool + + // Established marks the time at which the connection was established + Established time.Time + + QueryMeta +} + +// EmitNodeEventsRequest is a request to update the node events source +// with a new client-side event +type EmitNodeEventsRequest struct { + // NodeEvents are a map where the key is a node id, and value is a list of + // events for that node + NodeEvents map[string][]*NodeEvent + + WriteRequest +} + +// EmitNodeEventsResponse is a response to the client about the status of +// the node event source update. +type EmitNodeEventsResponse struct { + Index uint64 + WriteMeta +} + +const ( + NodeEventSubsystemDrain = "Drain" + NodeEventSubsystemDriver = "Driver" + NodeEventSubsystemHeartbeat = "Heartbeat" + NodeEventSubsystemCluster = "Cluster" +) + +// NodeEvent is a single unit representing a node’s state change +type NodeEvent struct { + Message string + Subsystem string + Details map[string]string + Timestamp time.Time + CreateIndex uint64 +} + +func (ne *NodeEvent) String() string { + var details []string + for k, v := range ne.Details { + details = append(details, fmt.Sprintf("%s: %s", k, v)) + } + + return fmt.Sprintf("Message: %s, Subsystem: %s, Details: %s, Timestamp: %s", ne.Message, ne.Subsystem, strings.Join(details, ","), ne.Timestamp.String()) +} + +func (ne *NodeEvent) Copy() *NodeEvent { + c := new(NodeEvent) + *c = *ne + c.Details = helper.CopyMapStringString(ne.Details) + return c +} + +// NewNodeEvent generates a new node event storing the current time as the +// timestamp +func NewNodeEvent() *NodeEvent { + return &NodeEvent{Timestamp: time.Now()} +} + +// SetMessage is used to set the message on the node event +func (ne *NodeEvent) SetMessage(msg string) *NodeEvent { + ne.Message = msg + return ne +} + +// SetSubsystem is used to set the subsystem on the node event +func (ne *NodeEvent) SetSubsystem(sys string) *NodeEvent { + ne.Subsystem = sys + return ne +} + +// SetTimestamp is used to set the timestamp on the node event +func (ne *NodeEvent) SetTimestamp(ts time.Time) *NodeEvent { + ne.Timestamp = ts + return ne +} + +// AddDetail is used to add a detail to the node event +func (ne *NodeEvent) AddDetail(k, v string) *NodeEvent { + if ne.Details == nil { + ne.Details = make(map[string]string, 1) + } + ne.Details[k] = v + return ne +} + +const ( + NodeStatusInit = "initializing" + NodeStatusReady = "ready" + NodeStatusDown = "down" +) + +// ShouldDrainNode checks if a given node status should trigger an +// evaluation. Some states don't require any further action. +func ShouldDrainNode(status string) bool { + switch status { + case NodeStatusInit, NodeStatusReady: + return false + case NodeStatusDown: + return true + default: + panic(fmt.Sprintf("unhandled node status %s", status)) + } +} + +// ValidNodeStatus is used to check if a node status is valid +func ValidNodeStatus(status string) bool { + switch status { + case NodeStatusInit, NodeStatusReady, NodeStatusDown: + return true + default: + return false + } +} + +const ( + // NodeSchedulingEligible and Ineligible marks the node as eligible or not, + // respectively, for receiving allocations. This is orthoginal to the node + // status being ready. + NodeSchedulingEligible = "eligible" + NodeSchedulingIneligible = "ineligible" +) + +// DrainSpec describes a Node's desired drain behavior. +type DrainSpec struct { + // Deadline is the duration after StartTime when the remaining + // allocations on a draining Node should be told to stop. + Deadline time.Duration + + // IgnoreSystemJobs allows systems jobs to remain on the node even though it + // has been marked for draining. + IgnoreSystemJobs bool +} + +// DrainStrategy describes a Node's drain behavior. +type DrainStrategy struct { + // DrainSpec is the user declared drain specification + DrainSpec + + // ForceDeadline is the deadline time for the drain after which drains will + // be forced + ForceDeadline time.Time +} + +func (d *DrainStrategy) Copy() *DrainStrategy { + if d == nil { + return nil + } + + nd := new(DrainStrategy) + *nd = *d + return nd +} + +// DeadlineTime returns a boolean whether the drain strategy allows an infinite +// duration or otherwise the deadline time. The force drain is captured by the +// deadline time being in the past. +func (d *DrainStrategy) DeadlineTime() (infinite bool, deadline time.Time) { + // Treat the nil case as a force drain so during an upgrade where a node may + // not have a drain strategy but has Drain set to true, it is treated as a + // force to mimick old behavior. + if d == nil { + return false, time.Time{} + } + + ns := d.Deadline.Nanoseconds() + switch { + case ns < 0: // Force + return false, time.Time{} + case ns == 0: // Infinite + return true, time.Time{} + default: + return false, d.ForceDeadline + } +} + +func (d *DrainStrategy) Equal(o *DrainStrategy) bool { + if d == nil && o == nil { + return true + } else if o != nil && d == nil { + return false + } else if d != nil && o == nil { + return false + } + + // Compare values + if d.ForceDeadline != o.ForceDeadline { + return false + } else if d.Deadline != o.Deadline { + return false + } else if d.IgnoreSystemJobs != o.IgnoreSystemJobs { + return false + } + + return true +} + +// Node is a representation of a schedulable client node +type Node struct { + // ID is a unique identifier for the node. It can be constructed + // by doing a concatenation of the Name and Datacenter as a simple + // approach. Alternatively a UUID may be used. + ID string + + // SecretID is an ID that is only known by the Node and the set of Servers. + // It is not accessible via the API and is used to authenticate nodes + // conducting privileged activities. + SecretID string + + // Datacenter for this node + Datacenter string + + // Node name + Name string + + // HTTPAddr is the address on which the Nomad client is listening for http + // requests + HTTPAddr string + + // TLSEnabled indicates if the Agent has TLS enabled for the HTTP API + TLSEnabled bool + + // Attributes is an arbitrary set of key/value + // data that can be used for constraints. Examples + // include "kernel.name=linux", "arch=386", "driver.docker=1", + // "docker.runtime=1.8.3" + Attributes map[string]string + + // NodeResources captures the available resources on the client. + NodeResources *NodeResources + + // ReservedResources captures the set resources on the client that are + // reserved from scheduling. + ReservedResources *NodeReservedResources + + // Resources is the available resources on the client. + // For example 'cpu=2' 'memory=2048' + // COMPAT(0.10): Remove in 0.10 + Resources *Resources + + // Reserved is the set of resources that are reserved, + // and should be subtracted from the total resources for + // the purposes of scheduling. This may be provide certain + // high-watermark tolerances or because of external schedulers + // consuming resources. + Reserved *Resources + + // Links are used to 'link' this client to external + // systems. For example 'consul=foo.dc1' 'aws=i-83212' + // 'ami=ami-123' + Links map[string]string + + // Meta is used to associate arbitrary metadata with this + // client. This is opaque to Nomad. + Meta map[string]string + + // NodeClass is an opaque identifier used to group nodes + // together for the purpose of determining scheduling pressure. + NodeClass string + + // ComputedClass is a unique id that identifies nodes with a common set of + // attributes and capabilities. + ComputedClass string + + // COMPAT: Remove in Nomad 0.9 + // Drain is controlled by the servers, and not the client. + // If true, no jobs will be scheduled to this node, and existing + // allocations will be drained. Superceded by DrainStrategy in Nomad + // 0.8 but kept for backward compat. + Drain bool + + // DrainStrategy determines the node's draining behavior. Will be nil + // when Drain=false. + DrainStrategy *DrainStrategy + + // SchedulingEligibility determines whether this node will receive new + // placements. + SchedulingEligibility string + + // Status of this node + Status string + + // StatusDescription is meant to provide more human useful information + StatusDescription string + + // StatusUpdatedAt is the time stamp at which the state of the node was + // updated + StatusUpdatedAt int64 + + // Events is the most recent set of events generated for the node, + // retaining only MaxRetainedNodeEvents number at a time + Events []*NodeEvent + + // Drivers is a map of driver names to current driver information + Drivers map[string]*DriverInfo + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// Ready returns true if the node is ready for running allocations +func (n *Node) Ready() bool { + // Drain is checked directly to support pre-0.8 Node data + return n.Status == NodeStatusReady && !n.Drain && n.SchedulingEligibility == NodeSchedulingEligible +} + +func (n *Node) Canonicalize() { + if n == nil { + return + } + + // COMPAT Remove in 0.10 + // In v0.8.0 we introduced scheduling eligibility, so we need to set it for + // upgrading nodes + if n.SchedulingEligibility == "" { + if n.Drain { + n.SchedulingEligibility = NodeSchedulingIneligible + } else { + n.SchedulingEligibility = NodeSchedulingEligible + } + } +} + +func (n *Node) Copy() *Node { + if n == nil { + return nil + } + nn := new(Node) + *nn = *n + nn.Attributes = helper.CopyMapStringString(nn.Attributes) + nn.Resources = nn.Resources.Copy() + nn.Reserved = nn.Reserved.Copy() + nn.NodeResources = nn.NodeResources.Copy() + nn.ReservedResources = nn.ReservedResources.Copy() + nn.Links = helper.CopyMapStringString(nn.Links) + nn.Meta = helper.CopyMapStringString(nn.Meta) + nn.Events = copyNodeEvents(n.Events) + nn.DrainStrategy = nn.DrainStrategy.Copy() + nn.Drivers = copyNodeDrivers(n.Drivers) + return nn +} + +// copyNodeEvents is a helper to copy a list of NodeEvent's +func copyNodeEvents(events []*NodeEvent) []*NodeEvent { + l := len(events) + if l == 0 { + return nil + } + + c := make([]*NodeEvent, l) + for i, event := range events { + c[i] = event.Copy() + } + return c +} + +// copyNodeDrivers is a helper to copy a map of DriverInfo +func copyNodeDrivers(drivers map[string]*DriverInfo) map[string]*DriverInfo { + l := len(drivers) + if l == 0 { + return nil + } + + c := make(map[string]*DriverInfo, l) + for driver, info := range drivers { + c[driver] = info.Copy() + } + return c +} + +// TerminalStatus returns if the current status is terminal and +// will no longer transition. +func (n *Node) TerminalStatus() bool { + switch n.Status { + case NodeStatusDown: + return true + default: + return false + } +} + +// COMPAT(0.11): Remove in 0.11 +// ComparableReservedResources returns the reserved resouces on the node +// handling upgrade paths. Reserved networks must be handled separately. After +// 0.11 calls to this should be replaced with: +// node.ReservedResources.Comparable() +func (n *Node) ComparableReservedResources() *ComparableResources { + // See if we can no-op + if n.Reserved == nil && n.ReservedResources == nil { + return nil + } + + // Node already has 0.9+ behavior + if n.ReservedResources != nil { + return n.ReservedResources.Comparable() + } + + // Upgrade path + return &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: int64(n.Reserved.CPU), + }, + Memory: AllocatedMemoryResources{ + MemoryMB: int64(n.Reserved.MemoryMB), + }, + }, + Shared: AllocatedSharedResources{ + DiskMB: int64(n.Reserved.DiskMB), + }, + } +} + +// COMPAT(0.11): Remove in 0.11 +// ComparableResources returns the resouces on the node +// handling upgrade paths. Networking must be handled separately. After 0.11 +// calls to this should be replaced with: node.NodeResources.Comparable() +func (n *Node) ComparableResources() *ComparableResources { + // Node already has 0.9+ behavior + if n.NodeResources != nil { + return n.NodeResources.Comparable() + } + + // Upgrade path + return &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: int64(n.Resources.CPU), + }, + Memory: AllocatedMemoryResources{ + MemoryMB: int64(n.Resources.MemoryMB), + }, + }, + Shared: AllocatedSharedResources{ + DiskMB: int64(n.Resources.DiskMB), + }, + } +} + +// Stub returns a summarized version of the node +func (n *Node) Stub() *NodeListStub { + + addr, _, _ := net.SplitHostPort(n.HTTPAddr) + + return &NodeListStub{ + Address: addr, + ID: n.ID, + Datacenter: n.Datacenter, + Name: n.Name, + NodeClass: n.NodeClass, + Version: n.Attributes["nomad.version"], + Drain: n.Drain, + SchedulingEligibility: n.SchedulingEligibility, + Status: n.Status, + StatusDescription: n.StatusDescription, + Drivers: n.Drivers, + CreateIndex: n.CreateIndex, + ModifyIndex: n.ModifyIndex, + } +} + +// NodeListStub is used to return a subset of job information +// for the job list +type NodeListStub struct { + Address string + ID string + Datacenter string + Name string + NodeClass string + Version string + Drain bool + SchedulingEligibility string + Status string + StatusDescription string + Drivers map[string]*DriverInfo + CreateIndex uint64 + ModifyIndex uint64 +} + +// Resources is used to define the resources available +// on a client +type Resources struct { + CPU int + MemoryMB int + DiskMB int + IOPS int // COMPAT(0.10): Only being used to issue warnings + Networks Networks + Devices []*RequestedDevice +} + +const ( + BytesInMegabyte = 1024 * 1024 +) + +// DefaultResources is a small resources object that contains the +// default resources requests that we will provide to an object. +// --- THIS FUNCTION IS REPLICATED IN api/resources.go and should +// be kept in sync. +func DefaultResources() *Resources { + return &Resources{ + CPU: 100, + MemoryMB: 300, + } +} + +// MinResources is a small resources object that contains the +// absolute minimum resources that we will provide to an object. +// This should not be confused with the defaults which are +// provided in Canonicalize() --- THIS FUNCTION IS REPLICATED IN +// api/resources.go and should be kept in sync. +func MinResources() *Resources { + return &Resources{ + CPU: 20, + MemoryMB: 10, + } +} + +// DiskInBytes returns the amount of disk resources in bytes. +func (r *Resources) DiskInBytes() int64 { + return int64(r.DiskMB * BytesInMegabyte) +} + +func (r *Resources) Validate() error { + var mErr multierror.Error + if err := r.MeetsMinResources(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + // Ensure the task isn't asking for disk resources + if r.DiskMB > 0 { + mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level.")) + } + + for i, d := range r.Devices { + if err := d.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("device %d failed validation: %v", i+1, err)) + } + } + + return mErr.ErrorOrNil() +} + +// Merge merges this resource with another resource. +func (r *Resources) Merge(other *Resources) { + if other.CPU != 0 { + r.CPU = other.CPU + } + if other.MemoryMB != 0 { + r.MemoryMB = other.MemoryMB + } + if other.DiskMB != 0 { + r.DiskMB = other.DiskMB + } + if len(other.Networks) != 0 { + r.Networks = other.Networks + } + if len(other.Devices) != 0 { + r.Devices = other.Devices + } +} + +func (r *Resources) Canonicalize() { + // Ensure that an empty and nil slices are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(r.Networks) == 0 { + r.Networks = nil + } + if len(r.Devices) == 0 { + r.Devices = nil + } + + for _, n := range r.Networks { + n.Canonicalize() + } +} + +// MeetsMinResources returns an error if the resources specified are less than +// the minimum allowed. +// This is based on the minimums defined in the Resources type +func (r *Resources) MeetsMinResources() error { + var mErr multierror.Error + minResources := MinResources() + if r.CPU < minResources.CPU { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is %d; got %d", minResources.CPU, r.CPU)) + } + if r.MemoryMB < minResources.MemoryMB { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is %d; got %d", minResources.MemoryMB, r.MemoryMB)) + } + for i, n := range r.Networks { + if err := n.MeetsMinResources(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("network resource at index %d failed: %v", i, err)) + } + } + + return mErr.ErrorOrNil() +} + +// Copy returns a deep copy of the resources +func (r *Resources) Copy() *Resources { + if r == nil { + return nil + } + newR := new(Resources) + *newR = *r + + // Copy the network objects + if r.Networks != nil { + n := len(r.Networks) + newR.Networks = make([]*NetworkResource, n) + for i := 0; i < n; i++ { + newR.Networks[i] = r.Networks[i].Copy() + } + } + + // Copy the devices + if r.Devices != nil { + n := len(r.Devices) + newR.Devices = make([]*RequestedDevice, n) + for i := 0; i < n; i++ { + newR.Devices[i] = r.Devices[i].Copy() + } + } + + return newR +} + +// NetIndex finds the matching net index using device name +func (r *Resources) NetIndex(n *NetworkResource) int { + return r.Networks.NetIndex(n) +} + +// Superset checks if one set of resources is a superset +// of another. This ignores network resources, and the NetworkIndex +// should be used for that. +func (r *Resources) Superset(other *Resources) (bool, string) { + if r.CPU < other.CPU { + return false, "cpu" + } + if r.MemoryMB < other.MemoryMB { + return false, "memory" + } + if r.DiskMB < other.DiskMB { + return false, "disk" + } + return true, "" +} + +// Add adds the resources of the delta to this, potentially +// returning an error if not possible. +func (r *Resources) Add(delta *Resources) error { + if delta == nil { + return nil + } + r.CPU += delta.CPU + r.MemoryMB += delta.MemoryMB + r.DiskMB += delta.DiskMB + + for _, n := range delta.Networks { + // Find the matching interface by IP or CIDR + idx := r.NetIndex(n) + if idx == -1 { + r.Networks = append(r.Networks, n.Copy()) + } else { + r.Networks[idx].Add(n) + } + } + return nil +} + +func (r *Resources) GoString() string { + return fmt.Sprintf("*%#v", *r) +} + +type Port struct { + Label string + Value int +} + +// NetworkResource is used to represent available network +// resources +type NetworkResource struct { + Device string // Name of the device + CIDR string // CIDR block of addresses + IP string // Host IP address + MBits int // Throughput + ReservedPorts []Port // Host Reserved ports + DynamicPorts []Port // Host Dynamically assigned ports +} + +func (nr *NetworkResource) Equals(other *NetworkResource) bool { + if nr.Device != other.Device { + return false + } + + if nr.CIDR != other.CIDR { + return false + } + + if nr.IP != other.IP { + return false + } + + if nr.MBits != other.MBits { + return false + } + + if len(nr.ReservedPorts) != len(other.ReservedPorts) { + return false + } + + for i, port := range nr.ReservedPorts { + if len(other.ReservedPorts) <= i { + return false + } + if port != other.ReservedPorts[i] { + return false + } + } + + if len(nr.DynamicPorts) != len(other.DynamicPorts) { + return false + } + for i, port := range nr.DynamicPorts { + if len(other.DynamicPorts) <= i { + return false + } + if port != other.DynamicPorts[i] { + return false + } + } + return true +} + +func (n *NetworkResource) Canonicalize() { + // Ensure that an empty and nil slices are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(n.ReservedPorts) == 0 { + n.ReservedPorts = nil + } + if len(n.DynamicPorts) == 0 { + n.DynamicPorts = nil + } +} + +// MeetsMinResources returns an error if the resources specified are less than +// the minimum allowed. +func (n *NetworkResource) MeetsMinResources() error { + var mErr multierror.Error + if n.MBits < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MBits value is 1; got %d", n.MBits)) + } + return mErr.ErrorOrNil() +} + +// Copy returns a deep copy of the network resource +func (n *NetworkResource) Copy() *NetworkResource { + if n == nil { + return nil + } + newR := new(NetworkResource) + *newR = *n + if n.ReservedPorts != nil { + newR.ReservedPorts = make([]Port, len(n.ReservedPorts)) + copy(newR.ReservedPorts, n.ReservedPorts) + } + if n.DynamicPorts != nil { + newR.DynamicPorts = make([]Port, len(n.DynamicPorts)) + copy(newR.DynamicPorts, n.DynamicPorts) + } + return newR +} + +// Add adds the resources of the delta to this, potentially +// returning an error if not possible. +func (n *NetworkResource) Add(delta *NetworkResource) { + if len(delta.ReservedPorts) > 0 { + n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...) + } + n.MBits += delta.MBits + n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...) +} + +func (n *NetworkResource) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +// PortLabels returns a map of port labels to their assigned host ports. +func (n *NetworkResource) PortLabels() map[string]int { + num := len(n.ReservedPorts) + len(n.DynamicPorts) + labelValues := make(map[string]int, num) + for _, port := range n.ReservedPorts { + labelValues[port.Label] = port.Value + } + for _, port := range n.DynamicPorts { + labelValues[port.Label] = port.Value + } + return labelValues +} + +// Networks defined for a task on the Resources struct. +type Networks []*NetworkResource + +// Port assignment and IP for the given label or empty values. +func (ns Networks) Port(label string) (string, int) { + for _, n := range ns { + for _, p := range n.ReservedPorts { + if p.Label == label { + return n.IP, p.Value + } + } + for _, p := range n.DynamicPorts { + if p.Label == label { + return n.IP, p.Value + } + } + } + return "", 0 +} + +func (ns Networks) NetIndex(n *NetworkResource) int { + for idx, net := range ns { + if net.Device == n.Device { + return idx + } + } + return -1 +} + +// RequestedDevice is used to request a device for a task. +type RequestedDevice struct { + // Name is the request name. The possible values are as follows: + // * : A single value only specifies the type of request. + // * /: A single slash delimiter assumes the vendor and type of device is specified. + // * //: Two slash delimiters assume vendor, type and specific model are specified. + // + // Examples are as follows: + // * "gpu" + // * "nvidia/gpu" + // * "nvidia/gpu/GTX2080Ti" + Name string + + // Count is the number of requested devices + Count uint64 + + // Constraints are a set of constraints to apply when selecting the device + // to use. + Constraints []*Constraint + + // Affinities are a set of affinites to apply when selecting the device + // to use. + Affinities []*Affinity +} + +func (r *RequestedDevice) Copy() *RequestedDevice { + if r == nil { + return nil + } + + nr := *r + nr.Constraints = CopySliceConstraints(nr.Constraints) + nr.Affinities = CopySliceAffinities(nr.Affinities) + + return &nr +} + +func (r *RequestedDevice) ID() *DeviceIdTuple { + if r == nil || r.Name == "" { + return nil + } + + parts := strings.SplitN(r.Name, "/", 3) + switch len(parts) { + case 1: + return &DeviceIdTuple{ + Type: parts[0], + } + case 2: + return &DeviceIdTuple{ + Vendor: parts[0], + Type: parts[1], + } + default: + return &DeviceIdTuple{ + Vendor: parts[0], + Type: parts[1], + Name: parts[2], + } + } +} + +func (r *RequestedDevice) Validate() error { + if r == nil { + return nil + } + + var mErr multierror.Error + if r.Name == "" { + multierror.Append(&mErr, errors.New("device name must be given as one of the following: type, vendor/type, or vendor/type/name")) + } + + for idx, constr := range r.Constraints { + // Ensure that the constraint doesn't use an operand we do not allow + switch constr.Operand { + case ConstraintDistinctHosts, ConstraintDistinctProperty: + outer := fmt.Errorf("Constraint %d validation failed: using unsupported operand %q", idx+1, constr.Operand) + multierror.Append(&mErr, outer) + default: + if err := constr.Validate(); err != nil { + outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) + multierror.Append(&mErr, outer) + } + } + } + for idx, affinity := range r.Affinities { + if err := affinity.Validate(); err != nil { + outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err) + multierror.Append(&mErr, outer) + } + } + + return mErr.ErrorOrNil() +} + +// NodeResources is used to define the resources available on a client node. +type NodeResources struct { + Cpu NodeCpuResources + Memory NodeMemoryResources + Disk NodeDiskResources + Networks Networks + Devices []*NodeDeviceResource +} + +func (n *NodeResources) Copy() *NodeResources { + if n == nil { + return nil + } + + newN := new(NodeResources) + *newN = *n + + // Copy the networks + if n.Networks != nil { + networks := len(n.Networks) + newN.Networks = make([]*NetworkResource, networks) + for i := 0; i < networks; i++ { + newN.Networks[i] = n.Networks[i].Copy() + } + } + + // Copy the devices + if n.Devices != nil { + devices := len(n.Devices) + newN.Devices = make([]*NodeDeviceResource, devices) + for i := 0; i < devices; i++ { + newN.Devices[i] = n.Devices[i].Copy() + } + } + + return newN +} + +// Comparable returns a comparable version of the nodes resources. This +// conversion can be lossy so care must be taken when using it. +func (n *NodeResources) Comparable() *ComparableResources { + if n == nil { + return nil + } + + c := &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: n.Cpu.CpuShares, + }, + Memory: AllocatedMemoryResources{ + MemoryMB: n.Memory.MemoryMB, + }, + Networks: n.Networks, + }, + Shared: AllocatedSharedResources{ + DiskMB: n.Disk.DiskMB, + }, + } + return c +} + +func (n *NodeResources) Merge(o *NodeResources) { + if o == nil { + return + } + + n.Cpu.Merge(&o.Cpu) + n.Memory.Merge(&o.Memory) + n.Disk.Merge(&o.Disk) + + if len(o.Networks) != 0 { + n.Networks = o.Networks + } + + if len(o.Devices) != 0 { + n.Devices = o.Devices + } +} + +func (n *NodeResources) Equals(o *NodeResources) bool { + if o == nil && n == nil { + return true + } else if o == nil { + return false + } else if n == nil { + return false + } + + if !n.Cpu.Equals(&o.Cpu) { + return false + } + if !n.Memory.Equals(&o.Memory) { + return false + } + if !n.Disk.Equals(&o.Disk) { + return false + } + + if len(n.Networks) != len(o.Networks) { + return false + } + for i, n := range n.Networks { + if !n.Equals(o.Networks[i]) { + return false + } + } + + // Check the devices + if !DevicesEquals(n.Devices, o.Devices) { + return false + } + + return true +} + +// DevicesEquals returns true if the two device arrays are equal +func DevicesEquals(d1, d2 []*NodeDeviceResource) bool { + if len(d1) != len(d2) { + return false + } + idMap := make(map[DeviceIdTuple]*NodeDeviceResource, len(d1)) + for _, d := range d1 { + idMap[*d.ID()] = d + } + for _, otherD := range d2 { + if d, ok := idMap[*otherD.ID()]; !ok || !d.Equals(otherD) { + return false + } + } + + return true +} + +// NodeCpuResources captures the CPU resources of the node. +type NodeCpuResources struct { + // CpuShares is the CPU shares available. This is calculated by number of + // cores multiplied by the core frequency. + CpuShares int64 +} + +func (n *NodeCpuResources) Merge(o *NodeCpuResources) { + if o == nil { + return + } + + if o.CpuShares != 0 { + n.CpuShares = o.CpuShares + } +} + +func (n *NodeCpuResources) Equals(o *NodeCpuResources) bool { + if o == nil && n == nil { + return true + } else if o == nil { + return false + } else if n == nil { + return false + } + + if n.CpuShares != o.CpuShares { + return false + } + + return true +} + +// NodeMemoryResources captures the memory resources of the node +type NodeMemoryResources struct { + // MemoryMB is the total available memory on the node + MemoryMB int64 +} + +func (n *NodeMemoryResources) Merge(o *NodeMemoryResources) { + if o == nil { + return + } + + if o.MemoryMB != 0 { + n.MemoryMB = o.MemoryMB + } +} + +func (n *NodeMemoryResources) Equals(o *NodeMemoryResources) bool { + if o == nil && n == nil { + return true + } else if o == nil { + return false + } else if n == nil { + return false + } + + if n.MemoryMB != o.MemoryMB { + return false + } + + return true +} + +// NodeDiskResources captures the disk resources of the node +type NodeDiskResources struct { + // DiskMB is the total available disk space on the node + DiskMB int64 +} + +func (n *NodeDiskResources) Merge(o *NodeDiskResources) { + if o == nil { + return + } + if o.DiskMB != 0 { + n.DiskMB = o.DiskMB + } +} + +func (n *NodeDiskResources) Equals(o *NodeDiskResources) bool { + if o == nil && n == nil { + return true + } else if o == nil { + return false + } else if n == nil { + return false + } + + if n.DiskMB != o.DiskMB { + return false + } + + return true +} + +// DeviceIdTuple is the tuple that identifies a device +type DeviceIdTuple struct { + Vendor string + Type string + Name string +} + +func (d *DeviceIdTuple) String() string { + if d == nil { + return "" + } + + return fmt.Sprintf("%s/%s/%s", d.Vendor, d.Type, d.Name) +} + +// Matches returns if this Device ID is a superset of the passed ID. +func (id *DeviceIdTuple) Matches(other *DeviceIdTuple) bool { + if other == nil { + return false + } + + if other.Name != "" && other.Name != id.Name { + return false + } + + if other.Vendor != "" && other.Vendor != id.Vendor { + return false + } + + if other.Type != "" && other.Type != id.Type { + return false + } + + return true +} + +// Equals returns if this Device ID is the same as the passed ID. +func (id *DeviceIdTuple) Equals(o *DeviceIdTuple) bool { + if id == nil && o == nil { + return true + } else if id == nil || o == nil { + return false + } + + return o.Vendor == id.Vendor && o.Type == id.Type && o.Name == id.Name +} + +// NodeDeviceResource captures a set of devices sharing a common +// vendor/type/device_name tuple. +type NodeDeviceResource struct { + Vendor string + Type string + Name string + Instances []*NodeDevice + Attributes map[string]*psstructs.Attribute +} + +func (n *NodeDeviceResource) ID() *DeviceIdTuple { + if n == nil { + return nil + } + + return &DeviceIdTuple{ + Vendor: n.Vendor, + Type: n.Type, + Name: n.Name, + } +} + +func (n *NodeDeviceResource) Copy() *NodeDeviceResource { + if n == nil { + return nil + } + + // Copy the primitives + nn := *n + + // Copy the device instances + if l := len(nn.Instances); l != 0 { + nn.Instances = make([]*NodeDevice, 0, l) + for _, d := range n.Instances { + nn.Instances = append(nn.Instances, d.Copy()) + } + } + + // Copy the Attributes + nn.Attributes = psstructs.CopyMapStringAttribute(nn.Attributes) + + return &nn +} + +func (n *NodeDeviceResource) Equals(o *NodeDeviceResource) bool { + if o == nil && n == nil { + return true + } else if o == nil { + return false + } else if n == nil { + return false + } + + if n.Vendor != o.Vendor { + return false + } else if n.Type != o.Type { + return false + } else if n.Name != o.Name { + return false + } + + // Check the attributes + if len(n.Attributes) != len(o.Attributes) { + return false + } + for k, v := range n.Attributes { + if otherV, ok := o.Attributes[k]; !ok || v != otherV { + return false + } + } + + // Check the instances + if len(n.Instances) != len(o.Instances) { + return false + } + idMap := make(map[string]*NodeDevice, len(n.Instances)) + for _, d := range n.Instances { + idMap[d.ID] = d + } + for _, otherD := range o.Instances { + if d, ok := idMap[otherD.ID]; !ok || !d.Equals(otherD) { + return false + } + } + + return true +} + +// NodeDevice is an instance of a particular device. +type NodeDevice struct { + // ID is the ID of the device. + ID string + + // Healthy captures whether the device is healthy. + Healthy bool + + // HealthDescription is used to provide a human readable description of why + // the device may be unhealthy. + HealthDescription string + + // Locality stores HW locality information for the node to optionally be + // used when making placement decisions. + Locality *NodeDeviceLocality +} + +func (n *NodeDevice) Equals(o *NodeDevice) bool { + if o == nil && n == nil { + return true + } else if o == nil { + return false + } else if n == nil { + return false + } + + if n.ID != o.ID { + return false + } else if n.Healthy != o.Healthy { + return false + } else if n.HealthDescription != o.HealthDescription { + return false + } else if !n.Locality.Equals(o.Locality) { + return false + } + + return false +} + +func (n *NodeDevice) Copy() *NodeDevice { + if n == nil { + return nil + } + + // Copy the primitives + nn := *n + + // Copy the locality + nn.Locality = nn.Locality.Copy() + + return &nn +} + +// NodeDeviceLocality stores information about the devices hardware locality on +// the node. +type NodeDeviceLocality struct { + // PciBusID is the PCI Bus ID for the device. + PciBusID string +} + +func (n *NodeDeviceLocality) Equals(o *NodeDeviceLocality) bool { + if o == nil && n == nil { + return true + } else if o == nil { + return false + } else if n == nil { + return false + } + + if n.PciBusID != o.PciBusID { + return false + } + + return true +} + +func (n *NodeDeviceLocality) Copy() *NodeDeviceLocality { + if n == nil { + return nil + } + + // Copy the primitives + nn := *n + return &nn +} + +// NodeReservedResources is used to capture the resources on a client node that +// should be reserved and not made available to jobs. +type NodeReservedResources struct { + Cpu NodeReservedCpuResources + Memory NodeReservedMemoryResources + Disk NodeReservedDiskResources + Networks NodeReservedNetworkResources +} + +func (n *NodeReservedResources) Copy() *NodeReservedResources { + if n == nil { + return nil + } + newN := new(NodeReservedResources) + *newN = *n + return newN +} + +// Comparable returns a comparable version of the node's reserved resources. The +// returned resources doesn't contain any network information. This conversion +// can be lossy so care must be taken when using it. +func (n *NodeReservedResources) Comparable() *ComparableResources { + if n == nil { + return nil + } + + c := &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: n.Cpu.CpuShares, + }, + Memory: AllocatedMemoryResources{ + MemoryMB: n.Memory.MemoryMB, + }, + }, + Shared: AllocatedSharedResources{ + DiskMB: n.Disk.DiskMB, + }, + } + return c +} + +// NodeReservedCpuResources captures the reserved CPU resources of the node. +type NodeReservedCpuResources struct { + CpuShares int64 +} + +// NodeReservedMemoryResources captures the reserved memory resources of the node. +type NodeReservedMemoryResources struct { + MemoryMB int64 +} + +// NodeReservedDiskResources captures the reserved disk resources of the node. +type NodeReservedDiskResources struct { + DiskMB int64 +} + +// NodeReservedNetworkResources captures the reserved network resources of the node. +type NodeReservedNetworkResources struct { + // ReservedHostPorts is the set of ports reserved on all host network + // interfaces. Its format is a comma separate list of integers or integer + // ranges. (80,443,1000-2000,2005) + ReservedHostPorts string +} + +// ParsePortHostPorts returns the reserved host ports. +func (n *NodeReservedNetworkResources) ParseReservedHostPorts() ([]uint64, error) { + return ParsePortRanges(n.ReservedHostPorts) +} + +// AllocatedResources is the set of resources to be used by an allocation. +type AllocatedResources struct { + // Tasks is a mapping of task name to the resources for the task. + Tasks map[string]*AllocatedTaskResources + + // Shared is the set of resource that are shared by all tasks in the group. + Shared AllocatedSharedResources +} + +func (a *AllocatedResources) Copy() *AllocatedResources { + if a == nil { + return nil + } + newA := new(AllocatedResources) + *newA = *a + + if a.Tasks != nil { + tr := make(map[string]*AllocatedTaskResources, len(newA.Tasks)) + for task, resource := range newA.Tasks { + tr[task] = resource.Copy() + } + newA.Tasks = tr + } + + return newA +} + +// Comparable returns a comparable version of the allocations allocated +// resources. This conversion can be lossy so care must be taken when using it. +func (a *AllocatedResources) Comparable() *ComparableResources { + if a == nil { + return nil + } + + c := &ComparableResources{ + Shared: a.Shared, + } + for _, r := range a.Tasks { + c.Flattened.Add(r) + } + return c +} + +// OldTaskResources returns the pre-0.9.0 map of task resources +func (a *AllocatedResources) OldTaskResources() map[string]*Resources { + m := make(map[string]*Resources, len(a.Tasks)) + for name, res := range a.Tasks { + m[name] = &Resources{ + CPU: int(res.Cpu.CpuShares), + MemoryMB: int(res.Memory.MemoryMB), + Networks: res.Networks, + } + } + + return m +} + +// AllocatedTaskResources are the set of resources allocated to a task. +type AllocatedTaskResources struct { + Cpu AllocatedCpuResources + Memory AllocatedMemoryResources + Networks Networks + Devices []*AllocatedDeviceResource +} + +func (a *AllocatedTaskResources) Copy() *AllocatedTaskResources { + if a == nil { + return nil + } + newA := new(AllocatedTaskResources) + *newA = *a + + // Copy the networks + if a.Networks != nil { + n := len(a.Networks) + newA.Networks = make([]*NetworkResource, n) + for i := 0; i < n; i++ { + newA.Networks[i] = a.Networks[i].Copy() + } + } + + // Copy the devices + if newA.Devices != nil { + n := len(a.Devices) + newA.Devices = make([]*AllocatedDeviceResource, n) + for i := 0; i < n; i++ { + newA.Devices[i] = a.Devices[i].Copy() + } + } + + return newA +} + +// NetIndex finds the matching net index using device name +func (a *AllocatedTaskResources) NetIndex(n *NetworkResource) int { + return a.Networks.NetIndex(n) +} + +func (a *AllocatedTaskResources) Add(delta *AllocatedTaskResources) { + if delta == nil { + return + } + + a.Cpu.Add(&delta.Cpu) + a.Memory.Add(&delta.Memory) + + for _, n := range delta.Networks { + // Find the matching interface by IP or CIDR + idx := a.NetIndex(n) + if idx == -1 { + a.Networks = append(a.Networks, n.Copy()) + } else { + a.Networks[idx].Add(n) + } + } + + for _, d := range delta.Devices { + // Find the matching device + idx := AllocatedDevices(a.Devices).Index(d) + if idx == -1 { + a.Devices = append(a.Devices, d.Copy()) + } else { + a.Devices[idx].Add(d) + } + } +} + +// Comparable turns AllocatedTaskResources into ComparableResources +// as a helper step in preemption +func (a *AllocatedTaskResources) Comparable() *ComparableResources { + ret := &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: a.Cpu.CpuShares, + }, + Memory: AllocatedMemoryResources{ + MemoryMB: a.Memory.MemoryMB, + }, + }, + } + if len(a.Networks) > 0 { + for _, net := range a.Networks { + ret.Flattened.Networks = append(ret.Flattened.Networks, net) + } + } + return ret +} + +// Subtract only subtracts CPU and Memory resources. Network utilization +// is managed separately in NetworkIndex +func (a *AllocatedTaskResources) Subtract(delta *AllocatedTaskResources) { + if delta == nil { + return + } + + a.Cpu.Subtract(&delta.Cpu) + a.Memory.Subtract(&delta.Memory) +} + +// AllocatedSharedResources are the set of resources allocated to a task group. +type AllocatedSharedResources struct { + DiskMB int64 +} + +func (a *AllocatedSharedResources) Add(delta *AllocatedSharedResources) { + if delta == nil { + return + } + + a.DiskMB += delta.DiskMB +} + +func (a *AllocatedSharedResources) Subtract(delta *AllocatedSharedResources) { + if delta == nil { + return + } + + a.DiskMB -= delta.DiskMB +} + +// AllocatedCpuResources captures the allocated CPU resources. +type AllocatedCpuResources struct { + CpuShares int64 +} + +func (a *AllocatedCpuResources) Add(delta *AllocatedCpuResources) { + if delta == nil { + return + } + + a.CpuShares += delta.CpuShares +} + +func (a *AllocatedCpuResources) Subtract(delta *AllocatedCpuResources) { + if delta == nil { + return + } + + a.CpuShares -= delta.CpuShares +} + +// AllocatedMemoryResources captures the allocated memory resources. +type AllocatedMemoryResources struct { + MemoryMB int64 +} + +func (a *AllocatedMemoryResources) Add(delta *AllocatedMemoryResources) { + if delta == nil { + return + } + + a.MemoryMB += delta.MemoryMB +} + +func (a *AllocatedMemoryResources) Subtract(delta *AllocatedMemoryResources) { + if delta == nil { + return + } + + a.MemoryMB -= delta.MemoryMB +} + +type AllocatedDevices []*AllocatedDeviceResource + +// Index finds the matching index using the passed device. If not found, -1 is +// returned. +func (a AllocatedDevices) Index(d *AllocatedDeviceResource) int { + if d == nil { + return -1 + } + + for i, o := range a { + if o.ID().Equals(d.ID()) { + return i + } + } + + return -1 +} + +// AllocatedDeviceResource captures a set of allocated devices. +type AllocatedDeviceResource struct { + // Vendor, Type, and Name are used to select the plugin to request the + // device IDs from. + Vendor string + Type string + Name string + + // DeviceIDs is the set of allocated devices + DeviceIDs []string +} + +func (a *AllocatedDeviceResource) ID() *DeviceIdTuple { + if a == nil { + return nil + } + + return &DeviceIdTuple{ + Vendor: a.Vendor, + Type: a.Type, + Name: a.Name, + } +} + +func (a *AllocatedDeviceResource) Add(delta *AllocatedDeviceResource) { + if delta == nil { + return + } + + a.DeviceIDs = append(a.DeviceIDs, delta.DeviceIDs...) +} + +func (a *AllocatedDeviceResource) Copy() *AllocatedDeviceResource { + if a == nil { + return a + } + + na := *a + + // Copy the devices + na.DeviceIDs = make([]string, len(a.DeviceIDs)) + for i, id := range a.DeviceIDs { + na.DeviceIDs[i] = id + } + + return &na +} + +// ComparableResources is the set of resources allocated to a task group but +// not keyed by Task, making it easier to compare. +type ComparableResources struct { + Flattened AllocatedTaskResources + Shared AllocatedSharedResources +} + +func (c *ComparableResources) Add(delta *ComparableResources) { + if delta == nil { + return + } + + c.Flattened.Add(&delta.Flattened) + c.Shared.Add(&delta.Shared) +} + +func (c *ComparableResources) Subtract(delta *ComparableResources) { + if delta == nil { + return + } + + c.Flattened.Subtract(&delta.Flattened) + c.Shared.Subtract(&delta.Shared) +} + +func (c *ComparableResources) Copy() *ComparableResources { + if c == nil { + return nil + } + newR := new(ComparableResources) + *newR = *c + return newR +} + +// Superset checks if one set of resources is a superset of another. This +// ignores network resources, and the NetworkIndex should be used for that. +func (c *ComparableResources) Superset(other *ComparableResources) (bool, string) { + if c.Flattened.Cpu.CpuShares < other.Flattened.Cpu.CpuShares { + return false, "cpu" + } + if c.Flattened.Memory.MemoryMB < other.Flattened.Memory.MemoryMB { + return false, "memory" + } + if c.Shared.DiskMB < other.Shared.DiskMB { + return false, "disk" + } + return true, "" +} + +// allocated finds the matching net index using device name +func (c *ComparableResources) NetIndex(n *NetworkResource) int { + return c.Flattened.Networks.NetIndex(n) +} + +const ( + // JobTypeNomad is reserved for internal system tasks and is + // always handled by the CoreScheduler. + JobTypeCore = "_core" + JobTypeService = "service" + JobTypeBatch = "batch" + JobTypeSystem = "system" +) + +const ( + JobStatusPending = "pending" // Pending means the job is waiting on scheduling + JobStatusRunning = "running" // Running means the job has non-terminal allocations + JobStatusDead = "dead" // Dead means all evaluation's and allocations are terminal +) + +const ( + // JobMinPriority is the minimum allowed priority + JobMinPriority = 1 + + // JobDefaultPriority is the default priority if not + // not specified. + JobDefaultPriority = 50 + + // JobMaxPriority is the maximum allowed priority + JobMaxPriority = 100 + + // Ensure CoreJobPriority is higher than any user + // specified job so that it gets priority. This is important + // for the system to remain healthy. + CoreJobPriority = JobMaxPriority * 2 + + // JobTrackedVersions is the number of historic job versions that are + // kept. + JobTrackedVersions = 6 +) + +// Job is the scope of a scheduling request to Nomad. It is the largest +// scoped object, and is a named collection of task groups. Each task group +// is further composed of tasks. A task group (TG) is the unit of scheduling +// however. +type Job struct { + // Stop marks whether the user has stopped the job. A stopped job will + // have all created allocations stopped and acts as a way to stop a job + // without purging it from the system. This allows existing allocs to be + // queried and the job to be inspected as it is being killed. + Stop bool + + // Region is the Nomad region that handles scheduling this job + Region string + + // Namespace is the namespace the job is submitted into. + Namespace string + + // ID is a unique identifier for the job per region. It can be + // specified hierarchically like LineOfBiz/OrgName/Team/Project + ID string + + // ParentID is the unique identifier of the job that spawned this job. + ParentID string + + // Name is the logical name of the job used to refer to it. This is unique + // per region, but not unique globally. + Name string + + // Type is used to control various behaviors about the job. Most jobs + // are service jobs, meaning they are expected to be long lived. + // Some jobs are batch oriented meaning they run and then terminate. + // This can be extended in the future to support custom schedulers. + Type string + + // Priority is used to control scheduling importance and if this job + // can preempt other jobs. + Priority int + + // AllAtOnce is used to control if incremental scheduling of task groups + // is allowed or if we must do a gang scheduling of the entire job. This + // can slow down larger jobs if resources are not available. + AllAtOnce bool + + // Datacenters contains all the datacenters this job is allowed to span + Datacenters []string + + // Constraints can be specified at a job level and apply to + // all the task groups and tasks. + Constraints []*Constraint + + // Affinities can be specified at the job level to express + // scheduling preferences that apply to all groups and tasks + Affinities []*Affinity + + // Spread can be specified at the job level to express spreading + // allocations across a desired attribute, such as datacenter + Spreads []*Spread + + // TaskGroups are the collections of task groups that this job needs + // to run. Each task group is an atomic unit of scheduling and placement. + TaskGroups []*TaskGroup + + // COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0. + Update UpdateStrategy + + // Periodic is used to define the interval the job is run at. + Periodic *PeriodicConfig + + // ParameterizedJob is used to specify the job as a parameterized job + // for dispatching. + ParameterizedJob *ParameterizedJobConfig + + // Dispatched is used to identify if the Job has been dispatched from a + // parameterized job. + Dispatched bool + + // Payload is the payload supplied when the job was dispatched. + Payload []byte + + // Meta is used to associate arbitrary metadata with this + // job. This is opaque to Nomad. + Meta map[string]string + + // VaultToken is the Vault token that proves the submitter of the job has + // access to the specified Vault policies. This field is only used to + // transfer the token and is not stored after Job submission. + VaultToken string + + // Job status + Status string + + // StatusDescription is meant to provide more human useful information + StatusDescription string + + // Stable marks a job as stable. Stability is only defined on "service" and + // "system" jobs. The stability of a job will be set automatically as part + // of a deployment and can be manually set via APIs. + Stable bool + + // Version is a monotonically increasing version number that is incremented + // on each job register. + Version uint64 + + // SubmitTime is the time at which the job was submitted as a UnixNano in + // UTC + SubmitTime int64 + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 + JobModifyIndex uint64 +} + +// NamespacedID returns the namespaced id useful for logging +func (j *Job) NamespacedID() *NamespacedID { + return &NamespacedID{ + ID: j.ID, + Namespace: j.Namespace, + } +} + +// Canonicalize is used to canonicalize fields in the Job. This should be called +// when registering a Job. A set of warnings are returned if the job was changed +// in anyway that the user should be made aware of. +func (j *Job) Canonicalize() (warnings error) { + if j == nil { + return nil + } + + var mErr multierror.Error + // Ensure that an empty and nil map are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(j.Meta) == 0 { + j.Meta = nil + } + + // Ensure the job is in a namespace. + if j.Namespace == "" { + j.Namespace = DefaultNamespace + } + + for _, tg := range j.TaskGroups { + tg.Canonicalize(j) + } + + if j.ParameterizedJob != nil { + j.ParameterizedJob.Canonicalize() + } + + if j.Periodic != nil { + j.Periodic.Canonicalize() + } + + return mErr.ErrorOrNil() +} + +// Copy returns a deep copy of the Job. It is expected that callers use recover. +// This job can panic if the deep copy failed as it uses reflection. +func (j *Job) Copy() *Job { + if j == nil { + return nil + } + nj := new(Job) + *nj = *j + nj.Datacenters = helper.CopySliceString(nj.Datacenters) + nj.Constraints = CopySliceConstraints(nj.Constraints) + nj.Affinities = CopySliceAffinities(nj.Affinities) + + if j.TaskGroups != nil { + tgs := make([]*TaskGroup, len(nj.TaskGroups)) + for i, tg := range nj.TaskGroups { + tgs[i] = tg.Copy() + } + nj.TaskGroups = tgs + } + + nj.Periodic = nj.Periodic.Copy() + nj.Meta = helper.CopyMapStringString(nj.Meta) + nj.ParameterizedJob = nj.ParameterizedJob.Copy() + return nj +} + +// Validate is used to sanity check a job input +func (j *Job) Validate() error { + var mErr multierror.Error + + if j.Region == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing job region")) + } + if j.ID == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing job ID")) + } else if strings.Contains(j.ID, " ") { + mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space")) + } + if j.Name == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing job name")) + } + if j.Namespace == "" { + mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace")) + } + switch j.Type { + case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem: + case "": + mErr.Errors = append(mErr.Errors, errors.New("Missing job type")) + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type)) + } + if j.Priority < JobMinPriority || j.Priority > JobMaxPriority { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority)) + } + if len(j.Datacenters) == 0 { + mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters")) + } + if len(j.TaskGroups) == 0 { + mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups")) + } + for idx, constr := range j.Constraints { + if err := constr.Validate(); err != nil { + outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + if j.Type == JobTypeSystem { + if j.Affinities != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza")) + } + } else { + for idx, affinity := range j.Affinities { + if err := affinity.Validate(); err != nil { + outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + } + + if j.Type == JobTypeSystem { + if j.Spreads != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza")) + } + } else { + for idx, spread := range j.Spreads { + if err := spread.Validate(); err != nil { + outer := fmt.Errorf("Spread %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + } + + // Check for duplicate task groups + taskGroups := make(map[string]int) + for idx, tg := range j.TaskGroups { + if tg.Name == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1)) + } else if existing, ok := taskGroups[tg.Name]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1)) + } else { + taskGroups[tg.Name] = idx + } + + if j.Type == "system" && tg.Count > 1 { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler", + tg.Name, tg.Count)) + } + } + + // Validate the task group + for _, tg := range j.TaskGroups { + if err := tg.Validate(j); err != nil { + outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + // Validate periodic is only used with batch jobs. + if j.IsPeriodic() && j.Periodic.Enabled { + if j.Type != JobTypeBatch { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Periodic can only be used with %q scheduler", JobTypeBatch)) + } + + if err := j.Periodic.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + + if j.IsParameterized() { + if j.Type != JobTypeBatch { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Parameterized job can only be used with %q scheduler", JobTypeBatch)) + } + + if err := j.ParameterizedJob.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + + return mErr.ErrorOrNil() +} + +// Warnings returns a list of warnings that may be from dubious settings or +// deprecation warnings. +func (j *Job) Warnings() error { + var mErr multierror.Error + + // Check the groups + for _, tg := range j.TaskGroups { + if err := tg.Warnings(j); err != nil { + outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + return mErr.ErrorOrNil() +} + +// LookupTaskGroup finds a task group by name +func (j *Job) LookupTaskGroup(name string) *TaskGroup { + for _, tg := range j.TaskGroups { + if tg.Name == name { + return tg + } + } + return nil +} + +// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined +// meta data for the task. When joining Job, Group and Task Meta, the precedence +// is by deepest scope (Task > Group > Job). +func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string { + group := j.LookupTaskGroup(groupName) + if group == nil { + return nil + } + + task := group.LookupTask(taskName) + if task == nil { + return nil + } + + meta := helper.CopyMapStringString(task.Meta) + if meta == nil { + meta = make(map[string]string, len(group.Meta)+len(j.Meta)) + } + + // Add the group specific meta + for k, v := range group.Meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + + // Add the job specific meta + for k, v := range j.Meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + + return meta +} + +// Stopped returns if a job is stopped. +func (j *Job) Stopped() bool { + return j == nil || j.Stop +} + +// HasUpdateStrategy returns if any task group in the job has an update strategy +func (j *Job) HasUpdateStrategy() bool { + for _, tg := range j.TaskGroups { + if tg.Update != nil { + return true + } + } + + return false +} + +// Stub is used to return a summary of the job +func (j *Job) Stub(summary *JobSummary) *JobListStub { + return &JobListStub{ + ID: j.ID, + ParentID: j.ParentID, + Name: j.Name, + Datacenters: j.Datacenters, + Type: j.Type, + Priority: j.Priority, + Periodic: j.IsPeriodic(), + ParameterizedJob: j.IsParameterized(), + Stop: j.Stop, + Status: j.Status, + StatusDescription: j.StatusDescription, + CreateIndex: j.CreateIndex, + ModifyIndex: j.ModifyIndex, + JobModifyIndex: j.JobModifyIndex, + SubmitTime: j.SubmitTime, + JobSummary: summary, + } +} + +// IsPeriodic returns whether a job is periodic. +func (j *Job) IsPeriodic() bool { + return j.Periodic != nil +} + +// IsPeriodicActive returns whether the job is an active periodic job that will +// create child jobs +func (j *Job) IsPeriodicActive() bool { + return j.IsPeriodic() && j.Periodic.Enabled && !j.Stopped() && !j.IsParameterized() +} + +// IsParameterized returns whether a job is parameterized job. +func (j *Job) IsParameterized() bool { + return j.ParameterizedJob != nil && !j.Dispatched +} + +// VaultPolicies returns the set of Vault policies per task group, per task +func (j *Job) VaultPolicies() map[string]map[string]*Vault { + policies := make(map[string]map[string]*Vault, len(j.TaskGroups)) + + for _, tg := range j.TaskGroups { + tgPolicies := make(map[string]*Vault, len(tg.Tasks)) + + for _, task := range tg.Tasks { + if task.Vault == nil { + continue + } + + tgPolicies[task.Name] = task.Vault + } + + if len(tgPolicies) != 0 { + policies[tg.Name] = tgPolicies + } + } + + return policies +} + +// RequiredSignals returns a mapping of task groups to tasks to their required +// set of signals +func (j *Job) RequiredSignals() map[string]map[string][]string { + signals := make(map[string]map[string][]string) + + for _, tg := range j.TaskGroups { + for _, task := range tg.Tasks { + // Use this local one as a set + taskSignals := make(map[string]struct{}) + + // Check if the Vault change mode uses signals + if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal { + taskSignals[task.Vault.ChangeSignal] = struct{}{} + } + + // If a user has specified a KillSignal, add it to required signals + if task.KillSignal != "" { + taskSignals[task.KillSignal] = struct{}{} + } + + // Check if any template change mode uses signals + for _, t := range task.Templates { + if t.ChangeMode != TemplateChangeModeSignal { + continue + } + + taskSignals[t.ChangeSignal] = struct{}{} + } + + // Flatten and sort the signals + l := len(taskSignals) + if l == 0 { + continue + } + + flat := make([]string, 0, l) + for sig := range taskSignals { + flat = append(flat, sig) + } + + sort.Strings(flat) + tgSignals, ok := signals[tg.Name] + if !ok { + tgSignals = make(map[string][]string) + signals[tg.Name] = tgSignals + } + tgSignals[task.Name] = flat + } + + } + + return signals +} + +// SpecChanged determines if the functional specification has changed between +// two job versions. +func (j *Job) SpecChanged(new *Job) bool { + if j == nil { + return new != nil + } + + // Create a copy of the new job + c := new.Copy() + + // Update the new job so we can do a reflect + c.Status = j.Status + c.StatusDescription = j.StatusDescription + c.Stable = j.Stable + c.Version = j.Version + c.CreateIndex = j.CreateIndex + c.ModifyIndex = j.ModifyIndex + c.JobModifyIndex = j.JobModifyIndex + c.SubmitTime = j.SubmitTime + + // Deep equals the jobs + return !reflect.DeepEqual(j, c) +} + +func (j *Job) SetSubmitTime() { + j.SubmitTime = time.Now().UTC().UnixNano() +} + +// JobListStub is used to return a subset of job information +// for the job list +type JobListStub struct { + ID string + ParentID string + Name string + Datacenters []string + Type string + Priority int + Periodic bool + ParameterizedJob bool + Stop bool + Status string + StatusDescription string + JobSummary *JobSummary + CreateIndex uint64 + ModifyIndex uint64 + JobModifyIndex uint64 + SubmitTime int64 +} + +// JobSummary summarizes the state of the allocations of a job +type JobSummary struct { + // JobID is the ID of the job the summary is for + JobID string + + // Namespace is the namespace of the job and its summary + Namespace string + + // Summary contains the summary per task group for the Job + Summary map[string]TaskGroupSummary + + // Children contains a summary for the children of this job. + Children *JobChildrenSummary + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// Copy returns a new copy of JobSummary +func (js *JobSummary) Copy() *JobSummary { + newJobSummary := new(JobSummary) + *newJobSummary = *js + newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary)) + for k, v := range js.Summary { + newTGSummary[k] = v + } + newJobSummary.Summary = newTGSummary + newJobSummary.Children = newJobSummary.Children.Copy() + return newJobSummary +} + +// JobChildrenSummary contains the summary of children job statuses +type JobChildrenSummary struct { + Pending int64 + Running int64 + Dead int64 +} + +// Copy returns a new copy of a JobChildrenSummary +func (jc *JobChildrenSummary) Copy() *JobChildrenSummary { + if jc == nil { + return nil + } + + njc := new(JobChildrenSummary) + *njc = *jc + return njc +} + +// TaskGroup summarizes the state of all the allocations of a particular +// TaskGroup +type TaskGroupSummary struct { + Queued int + Complete int + Failed int + Running int + Starting int + Lost int +} + +const ( + // Checks uses any registered health check state in combination with task + // states to determine if a allocation is healthy. + UpdateStrategyHealthCheck_Checks = "checks" + + // TaskStates uses the task states of an allocation to determine if the + // allocation is healthy. + UpdateStrategyHealthCheck_TaskStates = "task_states" + + // Manual allows the operator to manually signal to Nomad when an + // allocations is healthy. This allows more advanced health checking that is + // outside of the scope of Nomad. + UpdateStrategyHealthCheck_Manual = "manual" +) + +var ( + // DefaultUpdateStrategy provides a baseline that can be used to upgrade + // jobs with the old policy or for populating field defaults. + DefaultUpdateStrategy = &UpdateStrategy{ + Stagger: 30 * time.Second, + MaxParallel: 1, + HealthCheck: UpdateStrategyHealthCheck_Checks, + MinHealthyTime: 10 * time.Second, + HealthyDeadline: 5 * time.Minute, + ProgressDeadline: 10 * time.Minute, + AutoRevert: false, + Canary: 0, + } +) + +// UpdateStrategy is used to modify how updates are done +type UpdateStrategy struct { + // Stagger is used to determine the rate at which allocations are migrated + // due to down or draining nodes. + Stagger time.Duration + + // MaxParallel is how many updates can be done in parallel + MaxParallel int + + // HealthCheck specifies the mechanism in which allocations are marked + // healthy or unhealthy as part of a deployment. + HealthCheck string + + // MinHealthyTime is the minimum time an allocation must be in the healthy + // state before it is marked as healthy, unblocking more allocations to be + // rolled. + MinHealthyTime time.Duration + + // HealthyDeadline is the time in which an allocation must be marked as + // healthy before it is automatically transitioned to unhealthy. This time + // period doesn't count against the MinHealthyTime. + HealthyDeadline time.Duration + + // ProgressDeadline is the time in which an allocation as part of the + // deployment must transition to healthy. If no allocation becomes healthy + // after the deadline, the deployment is marked as failed. If the deadline + // is zero, the first failure causes the deployment to fail. + ProgressDeadline time.Duration + + // AutoRevert declares that if a deployment fails because of unhealthy + // allocations, there should be an attempt to auto-revert the job to a + // stable version. + AutoRevert bool + + // Canary is the number of canaries to deploy when a change to the task + // group is detected. + Canary int +} + +func (u *UpdateStrategy) Copy() *UpdateStrategy { + if u == nil { + return nil + } + + copy := new(UpdateStrategy) + *copy = *u + return copy +} + +func (u *UpdateStrategy) Validate() error { + if u == nil { + return nil + } + + var mErr multierror.Error + switch u.HealthCheck { + case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual: + default: + multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck)) + } + + if u.MaxParallel < 1 { + multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than one: %d < 1", u.MaxParallel)) + } + if u.Canary < 0 { + multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary)) + } + if u.MinHealthyTime < 0 { + multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime)) + } + if u.HealthyDeadline <= 0 { + multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline)) + } + if u.ProgressDeadline < 0 { + multierror.Append(&mErr, fmt.Errorf("Progress deadline must be zero or greater: %v", u.ProgressDeadline)) + } + if u.MinHealthyTime >= u.HealthyDeadline { + multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline)) + } + if u.ProgressDeadline != 0 && u.HealthyDeadline >= u.ProgressDeadline { + multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be less than progress deadline: %v > %v", u.HealthyDeadline, u.ProgressDeadline)) + } + if u.Stagger <= 0 { + multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger)) + } + + return mErr.ErrorOrNil() +} + +// TODO(alexdadgar): Remove once no longer used by the scheduler. +// Rolling returns if a rolling strategy should be used +func (u *UpdateStrategy) Rolling() bool { + return u.Stagger > 0 && u.MaxParallel > 0 +} + +const ( + // PeriodicSpecCron is used for a cron spec. + PeriodicSpecCron = "cron" + + // PeriodicSpecTest is only used by unit tests. It is a sorted, comma + // separated list of unix timestamps at which to launch. + PeriodicSpecTest = "_internal_test" +) + +// Periodic defines the interval a job should be run at. +type PeriodicConfig struct { + // Enabled determines if the job should be run periodically. + Enabled bool + + // Spec specifies the interval the job should be run as. It is parsed based + // on the SpecType. + Spec string + + // SpecType defines the format of the spec. + SpecType string + + // ProhibitOverlap enforces that spawned jobs do not run in parallel. + ProhibitOverlap bool + + // TimeZone is the user specified string that determines the time zone to + // launch against. The time zones must be specified from IANA Time Zone + // database, such as "America/New_York". + // Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + // Reference: https://www.iana.org/time-zones + TimeZone string + + // location is the time zone to evaluate the launch time against + location *time.Location +} + +func (p *PeriodicConfig) Copy() *PeriodicConfig { + if p == nil { + return nil + } + np := new(PeriodicConfig) + *np = *p + return np +} + +func (p *PeriodicConfig) Validate() error { + if !p.Enabled { + return nil + } + + var mErr multierror.Error + if p.Spec == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify a spec")) + } + + // Check if we got a valid time zone + if p.TimeZone != "" { + if _, err := time.LoadLocation(p.TimeZone); err != nil { + multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err)) + } + } + + switch p.SpecType { + case PeriodicSpecCron: + // Validate the cron spec + if _, err := cronexpr.Parse(p.Spec); err != nil { + multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err)) + } + case PeriodicSpecTest: + // No-op + default: + multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType)) + } + + return mErr.ErrorOrNil() +} + +func (p *PeriodicConfig) Canonicalize() { + // Load the location + l, err := time.LoadLocation(p.TimeZone) + if err != nil { + p.location = time.UTC + } + + p.location = l +} + +// CronParseNext is a helper that parses the next time for the given expression +// but captures any panic that may occur in the underlying library. +func CronParseNext(e *cronexpr.Expression, fromTime time.Time, spec string) (t time.Time, err error) { + defer func() { + if recover() != nil { + t = time.Time{} + err = fmt.Errorf("failed parsing cron expression: %q", spec) + } + }() + + return e.Next(fromTime), nil +} + +// Next returns the closest time instant matching the spec that is after the +// passed time. If no matching instance exists, the zero value of time.Time is +// returned. The `time.Location` of the returned value matches that of the +// passed time. +func (p *PeriodicConfig) Next(fromTime time.Time) (time.Time, error) { + switch p.SpecType { + case PeriodicSpecCron: + if e, err := cronexpr.Parse(p.Spec); err == nil { + return CronParseNext(e, fromTime, p.Spec) + } + case PeriodicSpecTest: + split := strings.Split(p.Spec, ",") + if len(split) == 1 && split[0] == "" { + return time.Time{}, nil + } + + // Parse the times + times := make([]time.Time, len(split)) + for i, s := range split { + unix, err := strconv.Atoi(s) + if err != nil { + return time.Time{}, nil + } + + times[i] = time.Unix(int64(unix), 0) + } + + // Find the next match + for _, next := range times { + if fromTime.Before(next) { + return next, nil + } + } + } + + return time.Time{}, nil +} + +// GetLocation returns the location to use for determining the time zone to run +// the periodic job against. +func (p *PeriodicConfig) GetLocation() *time.Location { + // Jobs pre 0.5.5 will not have this + if p.location != nil { + return p.location + } + + return time.UTC +} + +const ( + // PeriodicLaunchSuffix is the string appended to the periodic jobs ID + // when launching derived instances of it. + PeriodicLaunchSuffix = "/periodic-" +) + +// PeriodicLaunch tracks the last launch time of a periodic job. +type PeriodicLaunch struct { + ID string // ID of the periodic job. + Namespace string // Namespace of the periodic job + Launch time.Time // The last launch time. + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +const ( + DispatchPayloadForbidden = "forbidden" + DispatchPayloadOptional = "optional" + DispatchPayloadRequired = "required" + + // DispatchLaunchSuffix is the string appended to the parameterized job's ID + // when dispatching instances of it. + DispatchLaunchSuffix = "/dispatch-" +) + +// ParameterizedJobConfig is used to configure the parameterized job +type ParameterizedJobConfig struct { + // Payload configure the payload requirements + Payload string + + // MetaRequired is metadata keys that must be specified by the dispatcher + MetaRequired []string + + // MetaOptional is metadata keys that may be specified by the dispatcher + MetaOptional []string +} + +func (d *ParameterizedJobConfig) Validate() error { + var mErr multierror.Error + switch d.Payload { + case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden: + default: + multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload)) + } + + // Check that the meta configurations are disjoint sets + disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional) + if !disjoint { + multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending)) + } + + return mErr.ErrorOrNil() +} + +func (d *ParameterizedJobConfig) Canonicalize() { + if d.Payload == "" { + d.Payload = DispatchPayloadOptional + } +} + +func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig { + if d == nil { + return nil + } + nd := new(ParameterizedJobConfig) + *nd = *d + nd.MetaOptional = helper.CopySliceString(nd.MetaOptional) + nd.MetaRequired = helper.CopySliceString(nd.MetaRequired) + return nd +} + +// DispatchedID returns an ID appropriate for a job dispatched against a +// particular parameterized job +func DispatchedID(templateID string, t time.Time) string { + u := uuid.Generate()[:8] + return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u) +} + +// DispatchPayloadConfig configures how a task gets its input from a job dispatch +type DispatchPayloadConfig struct { + // File specifies a relative path to where the input data should be written + File string +} + +func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig { + if d == nil { + return nil + } + nd := new(DispatchPayloadConfig) + *nd = *d + return nd +} + +func (d *DispatchPayloadConfig) Validate() error { + // Verify the destination doesn't escape + escaped, err := PathEscapesAllocDir("task/local/", d.File) + if err != nil { + return fmt.Errorf("invalid destination path: %v", err) + } else if escaped { + return fmt.Errorf("destination escapes allocation directory") + } + + return nil +} + +var ( + // These default restart policies needs to be in sync with + // Canonicalize in api/tasks.go + + DefaultServiceJobRestartPolicy = RestartPolicy{ + Delay: 15 * time.Second, + Attempts: 2, + Interval: 30 * time.Minute, + Mode: RestartPolicyModeFail, + } + DefaultBatchJobRestartPolicy = RestartPolicy{ + Delay: 15 * time.Second, + Attempts: 3, + Interval: 24 * time.Hour, + Mode: RestartPolicyModeFail, + } +) + +var ( + // These default reschedule policies needs to be in sync with + // NewDefaultReschedulePolicy in api/tasks.go + + DefaultServiceJobReschedulePolicy = ReschedulePolicy{ + Delay: 30 * time.Second, + DelayFunction: "exponential", + MaxDelay: 1 * time.Hour, + Unlimited: true, + } + DefaultBatchJobReschedulePolicy = ReschedulePolicy{ + Attempts: 1, + Interval: 24 * time.Hour, + Delay: 5 * time.Second, + DelayFunction: "constant", + } +) + +const ( + // RestartPolicyModeDelay causes an artificial delay till the next interval is + // reached when the specified attempts have been reached in the interval. + RestartPolicyModeDelay = "delay" + + // RestartPolicyModeFail causes a job to fail if the specified number of + // attempts are reached within an interval. + RestartPolicyModeFail = "fail" + + // RestartPolicyMinInterval is the minimum interval that is accepted for a + // restart policy. + RestartPolicyMinInterval = 5 * time.Second + + // ReasonWithinPolicy describes restart events that are within policy + ReasonWithinPolicy = "Restart within policy" +) + +// RestartPolicy configures how Tasks are restarted when they crash or fail. +type RestartPolicy struct { + // Attempts is the number of restart that will occur in an interval. + Attempts int + + // Interval is a duration in which we can limit the number of restarts + // within. + Interval time.Duration + + // Delay is the time between a failure and a restart. + Delay time.Duration + + // Mode controls what happens when the task restarts more than attempt times + // in an interval. + Mode string +} + +func (r *RestartPolicy) Copy() *RestartPolicy { + if r == nil { + return nil + } + nrp := new(RestartPolicy) + *nrp = *r + return nrp +} + +func (r *RestartPolicy) Validate() error { + var mErr multierror.Error + switch r.Mode { + case RestartPolicyModeDelay, RestartPolicyModeFail: + default: + multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode)) + } + + // Check for ambiguous/confusing settings + if r.Attempts == 0 && r.Mode != RestartPolicyModeFail { + multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts)) + } + + if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() { + multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval)) + } + if time.Duration(r.Attempts)*r.Delay > r.Interval { + multierror.Append(&mErr, + fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay)) + } + return mErr.ErrorOrNil() +} + +func NewRestartPolicy(jobType string) *RestartPolicy { + switch jobType { + case JobTypeService, JobTypeSystem: + rp := DefaultServiceJobRestartPolicy + return &rp + case JobTypeBatch: + rp := DefaultBatchJobRestartPolicy + return &rp + } + return nil +} + +const ReschedulePolicyMinInterval = 15 * time.Second +const ReschedulePolicyMinDelay = 5 * time.Second + +var RescheduleDelayFunctions = [...]string{"constant", "exponential", "fibonacci"} + +// ReschedulePolicy configures how Tasks are rescheduled when they crash or fail. +type ReschedulePolicy struct { + // Attempts limits the number of rescheduling attempts that can occur in an interval. + Attempts int + + // Interval is a duration in which we can limit the number of reschedule attempts. + Interval time.Duration + + // Delay is a minimum duration to wait between reschedule attempts. + // The delay function determines how much subsequent reschedule attempts are delayed by. + Delay time.Duration + + // DelayFunction determines how the delay progressively changes on subsequent reschedule + // attempts. Valid values are "exponential", "constant", and "fibonacci". + DelayFunction string + + // MaxDelay is an upper bound on the delay. + MaxDelay time.Duration + + // Unlimited allows infinite rescheduling attempts. Only allowed when delay is set + // between reschedule attempts. + Unlimited bool +} + +func (r *ReschedulePolicy) Copy() *ReschedulePolicy { + if r == nil { + return nil + } + nrp := new(ReschedulePolicy) + *nrp = *r + return nrp +} + +func (r *ReschedulePolicy) Enabled() bool { + enabled := r != nil && (r.Attempts > 0 || r.Unlimited) + return enabled +} + +// Validate uses different criteria to validate the reschedule policy +// Delay must be a minimum of 5 seconds +// Delay Ceiling is ignored if Delay Function is "constant" +// Number of possible attempts is validated, given the interval, delay and delay function +func (r *ReschedulePolicy) Validate() error { + if !r.Enabled() { + return nil + } + var mErr multierror.Error + // Check for ambiguous/confusing settings + if r.Attempts > 0 { + if r.Interval <= 0 { + multierror.Append(&mErr, fmt.Errorf("Interval must be a non zero value if Attempts > 0")) + } + if r.Unlimited { + multierror.Append(&mErr, fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, "+ + "and Unlimited = %v is ambiguous", r.Attempts, r.Interval, r.Unlimited)) + multierror.Append(&mErr, errors.New("If Attempts >0, Unlimited cannot also be set to true")) + } + } + + delayPreCheck := true + // Delay should be bigger than the default + if r.Delay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() { + multierror.Append(&mErr, fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay)) + delayPreCheck = false + } + + // Must use a valid delay function + if !isValidDelayFunction(r.DelayFunction) { + multierror.Append(&mErr, fmt.Errorf("Invalid delay function %q, must be one of %q", r.DelayFunction, RescheduleDelayFunctions)) + delayPreCheck = false + } + + // Validate MaxDelay if not using linear delay progression + if r.DelayFunction != "constant" { + if r.MaxDelay.Nanoseconds() < ReschedulePolicyMinDelay.Nanoseconds() { + multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, r.Delay)) + delayPreCheck = false + } + if r.MaxDelay < r.Delay { + multierror.Append(&mErr, fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", r.Delay, r.MaxDelay)) + delayPreCheck = false + } + + } + + // Validate Interval and other delay parameters if attempts are limited + if !r.Unlimited { + if r.Interval.Nanoseconds() < ReschedulePolicyMinInterval.Nanoseconds() { + multierror.Append(&mErr, fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, r.Interval)) + } + if !delayPreCheck { + // We can't cross validate the rest of the delay params if delayPreCheck fails, so return early + return mErr.ErrorOrNil() + } + crossValidationErr := r.validateDelayParams() + if crossValidationErr != nil { + multierror.Append(&mErr, crossValidationErr) + } + } + return mErr.ErrorOrNil() +} + +func isValidDelayFunction(delayFunc string) bool { + for _, value := range RescheduleDelayFunctions { + if value == delayFunc { + return true + } + } + return false +} + +func (r *ReschedulePolicy) validateDelayParams() error { + ok, possibleAttempts, recommendedInterval := r.viableAttempts() + if ok { + return nil + } + var mErr multierror.Error + if r.DelayFunction == "constant" { + multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and "+ + "delay function %q", possibleAttempts, r.Interval, r.Delay, r.DelayFunction)) + } else { + multierror.Append(&mErr, fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ + "delay function %q, and delay ceiling %v", possibleAttempts, r.Interval, r.Delay, r.DelayFunction, r.MaxDelay)) + } + multierror.Append(&mErr, fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", recommendedInterval.Round(time.Second), r.Attempts)) + return mErr.ErrorOrNil() +} + +func (r *ReschedulePolicy) viableAttempts() (bool, int, time.Duration) { + var possibleAttempts int + var recommendedInterval time.Duration + valid := true + switch r.DelayFunction { + case "constant": + recommendedInterval = time.Duration(r.Attempts) * r.Delay + if r.Interval < recommendedInterval { + possibleAttempts = int(r.Interval / r.Delay) + valid = false + } + case "exponential": + for i := 0; i < r.Attempts; i++ { + nextDelay := time.Duration(math.Pow(2, float64(i))) * r.Delay + if nextDelay > r.MaxDelay { + nextDelay = r.MaxDelay + recommendedInterval += nextDelay + } else { + recommendedInterval = nextDelay + } + if recommendedInterval < r.Interval { + possibleAttempts++ + } + } + if possibleAttempts < r.Attempts { + valid = false + } + case "fibonacci": + var slots []time.Duration + slots = append(slots, r.Delay) + slots = append(slots, r.Delay) + reachedCeiling := false + for i := 2; i < r.Attempts; i++ { + var nextDelay time.Duration + if reachedCeiling { + //switch to linear + nextDelay = slots[i-1] + r.MaxDelay + } else { + nextDelay = slots[i-1] + slots[i-2] + if nextDelay > r.MaxDelay { + nextDelay = r.MaxDelay + reachedCeiling = true + } + } + slots = append(slots, nextDelay) + } + recommendedInterval = slots[len(slots)-1] + if r.Interval < recommendedInterval { + valid = false + // calculate possible attempts + for i := 0; i < len(slots); i++ { + if slots[i] > r.Interval { + possibleAttempts = i + break + } + } + } + default: + return false, 0, 0 + } + if possibleAttempts < 0 { // can happen if delay is bigger than interval + possibleAttempts = 0 + } + return valid, possibleAttempts, recommendedInterval +} + +func NewReschedulePolicy(jobType string) *ReschedulePolicy { + switch jobType { + case JobTypeService: + rp := DefaultServiceJobReschedulePolicy + return &rp + case JobTypeBatch: + rp := DefaultBatchJobReschedulePolicy + return &rp + } + return nil +} + +const ( + MigrateStrategyHealthChecks = "checks" + MigrateStrategyHealthStates = "task_states" +) + +type MigrateStrategy struct { + MaxParallel int + HealthCheck string + MinHealthyTime time.Duration + HealthyDeadline time.Duration +} + +// DefaultMigrateStrategy is used for backwards compat with pre-0.8 Allocations +// that lack an update strategy. +// +// This function should match its counterpart in api/tasks.go +func DefaultMigrateStrategy() *MigrateStrategy { + return &MigrateStrategy{ + MaxParallel: 1, + HealthCheck: MigrateStrategyHealthChecks, + MinHealthyTime: 10 * time.Second, + HealthyDeadline: 5 * time.Minute, + } +} + +func (m *MigrateStrategy) Validate() error { + var mErr multierror.Error + + if m.MaxParallel < 0 { + multierror.Append(&mErr, fmt.Errorf("MaxParallel must be >= 0 but found %d", m.MaxParallel)) + } + + switch m.HealthCheck { + case MigrateStrategyHealthChecks, MigrateStrategyHealthStates: + // ok + case "": + if m.MaxParallel > 0 { + multierror.Append(&mErr, fmt.Errorf("Missing HealthCheck")) + } + default: + multierror.Append(&mErr, fmt.Errorf("Invalid HealthCheck: %q", m.HealthCheck)) + } + + if m.MinHealthyTime < 0 { + multierror.Append(&mErr, fmt.Errorf("MinHealthyTime is %s and must be >= 0", m.MinHealthyTime)) + } + + if m.HealthyDeadline < 0 { + multierror.Append(&mErr, fmt.Errorf("HealthyDeadline is %s and must be >= 0", m.HealthyDeadline)) + } + + if m.MinHealthyTime > m.HealthyDeadline { + multierror.Append(&mErr, fmt.Errorf("MinHealthyTime must be less than HealthyDeadline")) + } + + return mErr.ErrorOrNil() +} + +// TaskGroup is an atomic unit of placement. Each task group belongs to +// a job and may contain any number of tasks. A task group support running +// in many replicas using the same configuration.. +type TaskGroup struct { + // Name of the task group + Name string + + // Count is the number of replicas of this task group that should + // be scheduled. + Count int + + // Update is used to control the update strategy for this task group + Update *UpdateStrategy + + // Migrate is used to control the migration strategy for this task group + Migrate *MigrateStrategy + + // Constraints can be specified at a task group level and apply to + // all the tasks contained. + Constraints []*Constraint + + //RestartPolicy of a TaskGroup + RestartPolicy *RestartPolicy + + // Tasks are the collection of tasks that this task group needs to run + Tasks []*Task + + // EphemeralDisk is the disk resources that the task group requests + EphemeralDisk *EphemeralDisk + + // Meta is used to associate arbitrary metadata with this + // task group. This is opaque to Nomad. + Meta map[string]string + + // ReschedulePolicy is used to configure how the scheduler should + // retry failed allocations. + ReschedulePolicy *ReschedulePolicy + + // Affinities can be specified at the task group level to express + // scheduling preferences. + Affinities []*Affinity + + // Spread can be specified at the task group level to express spreading + // allocations across a desired attribute, such as datacenter + Spreads []*Spread +} + +func (tg *TaskGroup) Copy() *TaskGroup { + if tg == nil { + return nil + } + ntg := new(TaskGroup) + *ntg = *tg + ntg.Update = ntg.Update.Copy() + ntg.Constraints = CopySliceConstraints(ntg.Constraints) + ntg.RestartPolicy = ntg.RestartPolicy.Copy() + ntg.ReschedulePolicy = ntg.ReschedulePolicy.Copy() + ntg.Affinities = CopySliceAffinities(ntg.Affinities) + ntg.Spreads = CopySliceSpreads(ntg.Spreads) + + if tg.Tasks != nil { + tasks := make([]*Task, len(ntg.Tasks)) + for i, t := range ntg.Tasks { + tasks[i] = t.Copy() + } + ntg.Tasks = tasks + } + + ntg.Meta = helper.CopyMapStringString(ntg.Meta) + + if tg.EphemeralDisk != nil { + ntg.EphemeralDisk = tg.EphemeralDisk.Copy() + } + return ntg +} + +// Canonicalize is used to canonicalize fields in the TaskGroup. +func (tg *TaskGroup) Canonicalize(job *Job) { + // Ensure that an empty and nil map are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(tg.Meta) == 0 { + tg.Meta = nil + } + + // Set the default restart policy. + if tg.RestartPolicy == nil { + tg.RestartPolicy = NewRestartPolicy(job.Type) + } + + if tg.ReschedulePolicy == nil { + tg.ReschedulePolicy = NewReschedulePolicy(job.Type) + } + + // Canonicalize Migrate for service jobs + if job.Type == JobTypeService && tg.Migrate == nil { + tg.Migrate = DefaultMigrateStrategy() + } + + // Set a default ephemeral disk object if the user has not requested for one + if tg.EphemeralDisk == nil { + tg.EphemeralDisk = DefaultEphemeralDisk() + } + + for _, task := range tg.Tasks { + task.Canonicalize(job, tg) + } +} + +// Validate is used to sanity check a task group +func (tg *TaskGroup) Validate(j *Job) error { + var mErr multierror.Error + if tg.Name == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing task group name")) + } + if tg.Count < 0 { + mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative")) + } + if len(tg.Tasks) == 0 { + mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group")) + } + for idx, constr := range tg.Constraints { + if err := constr.Validate(); err != nil { + outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + if j.Type == JobTypeSystem { + if tg.Affinities != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza")) + } + } else { + for idx, affinity := range tg.Affinities { + if err := affinity.Validate(); err != nil { + outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + } + + if tg.RestartPolicy != nil { + if err := tg.RestartPolicy.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } else { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name)) + } + + if j.Type == JobTypeSystem { + if tg.Spreads != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread stanza")) + } + } else { + for idx, spread := range tg.Spreads { + if err := spread.Validate(); err != nil { + outer := fmt.Errorf("Spread %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + } + + if j.Type == JobTypeSystem { + if tg.ReschedulePolicy != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs should not have a reschedule policy")) + } + } else { + if tg.ReschedulePolicy != nil { + if err := tg.ReschedulePolicy.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } else { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a reschedule policy", tg.Name)) + } + } + + if tg.EphemeralDisk != nil { + if err := tg.EphemeralDisk.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } else { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name)) + } + + // Validate the update strategy + if u := tg.Update; u != nil { + switch j.Type { + case JobTypeService, JobTypeSystem: + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type)) + } + if err := u.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + + // Validate the migration strategy + switch j.Type { + case JobTypeService: + if tg.Migrate != nil { + if err := tg.Migrate.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + default: + if tg.Migrate != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow migrate block", j.Type)) + } + } + + // Check for duplicate tasks, that there is only leader task if any, + // and no duplicated static ports + tasks := make(map[string]int) + staticPorts := make(map[int]string) + leaderTasks := 0 + for idx, task := range tg.Tasks { + if task.Name == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1)) + } else if existing, ok := tasks[task.Name]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1)) + } else { + tasks[task.Name] = idx + } + + if task.Leader { + leaderTasks++ + } + + if task.Resources == nil { + continue + } + + for _, net := range task.Resources.Networks { + for _, port := range net.ReservedPorts { + if other, ok := staticPorts[port.Value]; ok { + err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other) + mErr.Errors = append(mErr.Errors, err) + } else { + staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label) + } + } + } + } + + if leaderTasks > 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader")) + } + + // Validate the tasks + for _, task := range tg.Tasks { + if err := task.Validate(tg.EphemeralDisk, j.Type); err != nil { + outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + return mErr.ErrorOrNil() +} + +// Warnings returns a list of warnings that may be from dubious settings or +// deprecation warnings. +func (tg *TaskGroup) Warnings(j *Job) error { + var mErr multierror.Error + + // Validate the update strategy + if u := tg.Update; u != nil { + // Check the counts are appropriate + if u.MaxParallel > tg.Count { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+ + "A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count)) + } + } + + for _, t := range tg.Tasks { + if err := t.Warnings(); err != nil { + err = multierror.Prefix(err, fmt.Sprintf("Task %q:", t.Name)) + mErr.Errors = append(mErr.Errors, err) + } + } + + return mErr.ErrorOrNil() +} + +// LookupTask finds a task by name +func (tg *TaskGroup) LookupTask(name string) *Task { + for _, t := range tg.Tasks { + if t.Name == name { + return t + } + } + return nil +} + +func (tg *TaskGroup) GoString() string { + return fmt.Sprintf("*%#v", *tg) +} + +// CheckRestart describes if and when a task should be restarted based on +// failing health checks. +type CheckRestart struct { + Limit int // Restart task after this many unhealthy intervals + Grace time.Duration // Grace time to give tasks after starting to get healthy + IgnoreWarnings bool // If true treat checks in `warning` as passing +} + +func (c *CheckRestart) Copy() *CheckRestart { + if c == nil { + return nil + } + + nc := new(CheckRestart) + *nc = *c + return nc +} + +func (c *CheckRestart) Validate() error { + if c == nil { + return nil + } + + var mErr multierror.Error + if c.Limit < 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("limit must be greater than or equal to 0 but found %d", c.Limit)) + } + + if c.Grace < 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("grace period must be greater than or equal to 0 but found %d", c.Grace)) + } + + return mErr.ErrorOrNil() +} + +const ( + ServiceCheckHTTP = "http" + ServiceCheckTCP = "tcp" + ServiceCheckScript = "script" + ServiceCheckGRPC = "grpc" + + // minCheckInterval is the minimum check interval permitted. Consul + // currently has its MinInterval set to 1s. Mirror that here for + // consistency. + minCheckInterval = 1 * time.Second + + // minCheckTimeout is the minimum check timeout permitted for Consul + // script TTL checks. + minCheckTimeout = 1 * time.Second +) + +// The ServiceCheck data model represents the consul health check that +// Nomad registers for a Task +type ServiceCheck struct { + Name string // Name of the check, defaults to id + Type string // Type of the check - tcp, http, docker and script + Command string // Command is the command to run for script checks + Args []string // Args is a list of arguments for script checks + Path string // path of the health check url for http type check + Protocol string // Protocol to use if check is http, defaults to http + PortLabel string // The port to use for tcp/http checks + AddressMode string // 'host' to use host ip:port or 'driver' to use driver's + Interval time.Duration // Interval of the check + Timeout time.Duration // Timeout of the response from the check before consul fails the check + InitialStatus string // Initial status of the check + TLSSkipVerify bool // Skip TLS verification when Protocol=https + Method string // HTTP Method to use (GET by default) + Header map[string][]string // HTTP Headers for Consul to set when making HTTP checks + CheckRestart *CheckRestart // If and when a task should be restarted based on checks + GRPCService string // Service for GRPC checks + GRPCUseTLS bool // Whether or not to use TLS for GRPC checks +} + +func (sc *ServiceCheck) Copy() *ServiceCheck { + if sc == nil { + return nil + } + nsc := new(ServiceCheck) + *nsc = *sc + nsc.Args = helper.CopySliceString(sc.Args) + nsc.Header = helper.CopyMapStringSliceString(sc.Header) + nsc.CheckRestart = sc.CheckRestart.Copy() + return nsc +} + +func (sc *ServiceCheck) Canonicalize(serviceName string) { + // Ensure empty maps/slices are treated as null to avoid scheduling + // issues when using DeepEquals. + if len(sc.Args) == 0 { + sc.Args = nil + } + + if len(sc.Header) == 0 { + sc.Header = nil + } else { + for k, v := range sc.Header { + if len(v) == 0 { + sc.Header[k] = nil + } + } + } + + if sc.Name == "" { + sc.Name = fmt.Sprintf("service: %q check", serviceName) + } +} + +// validate a Service's ServiceCheck +func (sc *ServiceCheck) validate() error { + // Validate Type + switch strings.ToLower(sc.Type) { + case ServiceCheckGRPC: + case ServiceCheckTCP: + case ServiceCheckHTTP: + if sc.Path == "" { + return fmt.Errorf("http type must have a valid http path") + } + url, err := url.Parse(sc.Path) + if err != nil { + return fmt.Errorf("http type must have a valid http path") + } + if url.IsAbs() { + return fmt.Errorf("http type must have a relative http path") + } + + case ServiceCheckScript: + if sc.Command == "" { + return fmt.Errorf("script type must have a valid script path") + } + + default: + return fmt.Errorf(`invalid type (%+q), must be one of "http", "tcp", or "script" type`, sc.Type) + } + + // Validate interval and timeout + if sc.Interval == 0 { + return fmt.Errorf("missing required value interval. Interval cannot be less than %v", minCheckInterval) + } else if sc.Interval < minCheckInterval { + return fmt.Errorf("interval (%v) cannot be lower than %v", sc.Interval, minCheckInterval) + } + + if sc.Timeout == 0 { + return fmt.Errorf("missing required value timeout. Timeout cannot be less than %v", minCheckInterval) + } else if sc.Timeout < minCheckTimeout { + return fmt.Errorf("timeout (%v) is lower than required minimum timeout %v", sc.Timeout, minCheckInterval) + } + + // Validate InitialStatus + switch sc.InitialStatus { + case "": + case api.HealthPassing: + case api.HealthWarning: + case api.HealthCritical: + default: + return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical) + + } + + // Validate AddressMode + switch sc.AddressMode { + case "", AddressModeHost, AddressModeDriver: + // Ok + case AddressModeAuto: + return fmt.Errorf("invalid address_mode %q - %s only valid for services", sc.AddressMode, AddressModeAuto) + default: + return fmt.Errorf("invalid address_mode %q", sc.AddressMode) + } + + return sc.CheckRestart.Validate() +} + +// RequiresPort returns whether the service check requires the task has a port. +func (sc *ServiceCheck) RequiresPort() bool { + switch sc.Type { + case ServiceCheckGRPC, ServiceCheckHTTP, ServiceCheckTCP: + return true + default: + return false + } +} + +// TriggersRestarts returns true if this check should be watched and trigger a restart +// on failure. +func (sc *ServiceCheck) TriggersRestarts() bool { + return sc.CheckRestart != nil && sc.CheckRestart.Limit > 0 +} + +// Hash all ServiceCheck fields and the check's corresponding service ID to +// create an identifier. The identifier is not guaranteed to be unique as if +// the PortLabel is blank, the Service's PortLabel will be used after Hash is +// called. +func (sc *ServiceCheck) Hash(serviceID string) string { + h := sha1.New() + io.WriteString(h, serviceID) + io.WriteString(h, sc.Name) + io.WriteString(h, sc.Type) + io.WriteString(h, sc.Command) + io.WriteString(h, strings.Join(sc.Args, "")) + io.WriteString(h, sc.Path) + io.WriteString(h, sc.Protocol) + io.WriteString(h, sc.PortLabel) + io.WriteString(h, sc.Interval.String()) + io.WriteString(h, sc.Timeout.String()) + io.WriteString(h, sc.Method) + // Only include TLSSkipVerify if set to maintain ID stability with Nomad <0.6 + if sc.TLSSkipVerify { + io.WriteString(h, "true") + } + + // Since map iteration order isn't stable we need to write k/v pairs to + // a slice and sort it before hashing. + if len(sc.Header) > 0 { + headers := make([]string, 0, len(sc.Header)) + for k, v := range sc.Header { + headers = append(headers, k+strings.Join(v, "")) + } + sort.Strings(headers) + io.WriteString(h, strings.Join(headers, "")) + } + + // Only include AddressMode if set to maintain ID stability with Nomad <0.7.1 + if len(sc.AddressMode) > 0 { + io.WriteString(h, sc.AddressMode) + } + + // Only include GRPC if set to maintain ID stability with Nomad <0.8.4 + if sc.GRPCService != "" { + io.WriteString(h, sc.GRPCService) + } + if sc.GRPCUseTLS { + io.WriteString(h, "true") + } + + return fmt.Sprintf("%x", h.Sum(nil)) +} + +const ( + AddressModeAuto = "auto" + AddressModeHost = "host" + AddressModeDriver = "driver" +) + +// Service represents a Consul service definition in Nomad +type Service struct { + // Name of the service registered with Consul. Consul defaults the + // Name to ServiceID if not specified. The Name if specified is used + // as one of the seed values when generating a Consul ServiceID. + Name string + + // PortLabel is either the numeric port number or the `host:port`. + // To specify the port number using the host's Consul Advertise + // address, specify an empty host in the PortLabel (e.g. `:port`). + PortLabel string + + // AddressMode specifies whether or not to use the host ip:port for + // this service. + AddressMode string + + Tags []string // List of tags for the service + CanaryTags []string // List of tags for the service when it is a canary + Checks []*ServiceCheck // List of checks associated with the service +} + +func (s *Service) Copy() *Service { + if s == nil { + return nil + } + ns := new(Service) + *ns = *s + ns.Tags = helper.CopySliceString(ns.Tags) + ns.CanaryTags = helper.CopySliceString(ns.CanaryTags) + + if s.Checks != nil { + checks := make([]*ServiceCheck, len(ns.Checks)) + for i, c := range ns.Checks { + checks[i] = c.Copy() + } + ns.Checks = checks + } + + return ns +} + +// Canonicalize interpolates values of Job, Task Group and Task in the Service +// Name. This also generates check names, service id and check ids. +func (s *Service) Canonicalize(job string, taskGroup string, task string) { + // Ensure empty lists are treated as null to avoid scheduler issues when + // using DeepEquals + if len(s.Tags) == 0 { + s.Tags = nil + } + if len(s.CanaryTags) == 0 { + s.CanaryTags = nil + } + if len(s.Checks) == 0 { + s.Checks = nil + } + + s.Name = args.ReplaceEnv(s.Name, map[string]string{ + "JOB": job, + "TASKGROUP": taskGroup, + "TASK": task, + "BASE": fmt.Sprintf("%s-%s-%s", job, taskGroup, task), + }, + ) + + for _, check := range s.Checks { + check.Canonicalize(s.Name) + } +} + +// Validate checks if the Check definition is valid +func (s *Service) Validate() error { + var mErr multierror.Error + + // Ensure the service name is valid per the below RFCs but make an exception + // for our interpolation syntax by first stripping any environment variables from the name + + serviceNameStripped := args.ReplaceEnvWithPlaceHolder(s.Name, "ENV-VAR") + + if err := s.ValidateName(serviceNameStripped); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes: %q", s.Name)) + } + + switch s.AddressMode { + case "", AddressModeAuto, AddressModeHost, AddressModeDriver: + // OK + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("service address_mode must be %q, %q, or %q; not %q", AddressModeAuto, AddressModeHost, AddressModeDriver, s.AddressMode)) + } + + for _, c := range s.Checks { + if s.PortLabel == "" && c.PortLabel == "" && c.RequiresPort() { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but neither check nor service %+q have a port", c.Name, s.Name)) + continue + } + + if err := c.validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: %v", c.Name, err)) + } + } + + return mErr.ErrorOrNil() +} + +// ValidateName checks if the services Name is valid and should be called after +// the name has been interpolated +func (s *Service) ValidateName(name string) error { + // Ensure the service name is valid per RFC-952 §1 + // (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1 + // (https://tools.ietf.org/html/rfc1123), and RFC-2782 + // (https://tools.ietf.org/html/rfc2782). + re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])$`) + if !re.MatchString(name) { + return fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes and must be no longer than 63 characters: %q", name) + } + return nil +} + +// Hash returns a base32 encoded hash of a Service's contents excluding checks +// as they're hashed independently. +func (s *Service) Hash(allocID, taskName string, canary bool) string { + h := sha1.New() + io.WriteString(h, allocID) + io.WriteString(h, taskName) + io.WriteString(h, s.Name) + io.WriteString(h, s.PortLabel) + io.WriteString(h, s.AddressMode) + for _, tag := range s.Tags { + io.WriteString(h, tag) + } + for _, tag := range s.CanaryTags { + io.WriteString(h, tag) + } + + // Vary ID on whether or not CanaryTags will be used + if canary { + h.Write([]byte("Canary")) + } + + // Base32 is used for encoding the hash as sha1 hashes can always be + // encoded without padding, only 4 bytes larger than base64, and saves + // 8 bytes vs hex. Since these hashes are used in Consul URLs it's nice + // to have a reasonably compact URL-safe representation. + return b32.EncodeToString(h.Sum(nil)) +} + +const ( + // DefaultKillTimeout is the default timeout between signaling a task it + // will be killed and killing it. + DefaultKillTimeout = 5 * time.Second +) + +// LogConfig provides configuration for log rotation +type LogConfig struct { + MaxFiles int + MaxFileSizeMB int +} + +// DefaultLogConfig returns the default LogConfig values. +func DefaultLogConfig() *LogConfig { + return &LogConfig{ + MaxFiles: 10, + MaxFileSizeMB: 10, + } +} + +// Validate returns an error if the log config specified are less than +// the minimum allowed. +func (l *LogConfig) Validate() error { + var mErr multierror.Error + if l.MaxFiles < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles)) + } + if l.MaxFileSizeMB < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB)) + } + return mErr.ErrorOrNil() +} + +// Task is a single process typically that is executed as part of a task group. +type Task struct { + // Name of the task + Name string + + // Driver is used to control which driver is used + Driver string + + // User is used to determine which user will run the task. It defaults to + // the same user the Nomad client is being run as. + User string + + // Config is provided to the driver to initialize + Config map[string]interface{} + + // Map of environment variables to be used by the driver + Env map[string]string + + // List of service definitions exposed by the Task + Services []*Service + + // Vault is used to define the set of Vault policies that this task should + // have access to. + Vault *Vault + + // Templates are the set of templates to be rendered for the task. + Templates []*Template + + // Constraints can be specified at a task level and apply only to + // the particular task. + Constraints []*Constraint + + // Affinities can be specified at the task level to express + // scheduling preferences + Affinities []*Affinity + + // Resources is the resources needed by this task + Resources *Resources + + // DispatchPayload configures how the task retrieves its input from a dispatch + DispatchPayload *DispatchPayloadConfig + + // Meta is used to associate arbitrary metadata with this + // task. This is opaque to Nomad. + Meta map[string]string + + // KillTimeout is the time between signaling a task that it will be + // killed and killing it. + KillTimeout time.Duration + + // LogConfig provides configuration for log rotation + LogConfig *LogConfig + + // Artifacts is a list of artifacts to download and extract before running + // the task. + Artifacts []*TaskArtifact + + // Leader marks the task as the leader within the group. When the leader + // task exits, other tasks will be gracefully terminated. + Leader bool + + // ShutdownDelay is the duration of the delay between deregistering a + // task from Consul and sending it a signal to shutdown. See #2441 + ShutdownDelay time.Duration + + // The kill signal to use for the task. This is an optional specification, + + // KillSignal is the kill signal to use for the task. This is an optional + // specification and defaults to SIGINT + KillSignal string +} + +func (t *Task) Copy() *Task { + if t == nil { + return nil + } + nt := new(Task) + *nt = *t + nt.Env = helper.CopyMapStringString(nt.Env) + + if t.Services != nil { + services := make([]*Service, len(nt.Services)) + for i, s := range nt.Services { + services[i] = s.Copy() + } + nt.Services = services + } + + nt.Constraints = CopySliceConstraints(nt.Constraints) + nt.Affinities = CopySliceAffinities(nt.Affinities) + + nt.Vault = nt.Vault.Copy() + nt.Resources = nt.Resources.Copy() + nt.Meta = helper.CopyMapStringString(nt.Meta) + nt.DispatchPayload = nt.DispatchPayload.Copy() + + if t.Artifacts != nil { + artifacts := make([]*TaskArtifact, 0, len(t.Artifacts)) + for _, a := range nt.Artifacts { + artifacts = append(artifacts, a.Copy()) + } + nt.Artifacts = artifacts + } + + if i, err := copystructure.Copy(nt.Config); err != nil { + panic(err.Error()) + } else { + nt.Config = i.(map[string]interface{}) + } + + if t.Templates != nil { + templates := make([]*Template, len(t.Templates)) + for i, tmpl := range nt.Templates { + templates[i] = tmpl.Copy() + } + nt.Templates = templates + } + + return nt +} + +// Canonicalize canonicalizes fields in the task. +func (t *Task) Canonicalize(job *Job, tg *TaskGroup) { + // Ensure that an empty and nil map are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(t.Meta) == 0 { + t.Meta = nil + } + if len(t.Config) == 0 { + t.Config = nil + } + if len(t.Env) == 0 { + t.Env = nil + } + + for _, service := range t.Services { + service.Canonicalize(job.Name, tg.Name, t.Name) + } + + // If Resources are nil initialize them to defaults, otherwise canonicalize + if t.Resources == nil { + t.Resources = DefaultResources() + } else { + t.Resources.Canonicalize() + } + + // Set the default timeout if it is not specified. + if t.KillTimeout == 0 { + t.KillTimeout = DefaultKillTimeout + } + + if t.Vault != nil { + t.Vault.Canonicalize() + } + + for _, template := range t.Templates { + template.Canonicalize() + } +} + +func (t *Task) GoString() string { + return fmt.Sprintf("*%#v", *t) +} + +// Validate is used to sanity check a task +func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string) error { + var mErr multierror.Error + if t.Name == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing task name")) + } + if strings.ContainsAny(t.Name, `/\`) { + // We enforce this so that when creating the directory on disk it will + // not have any slashes. + mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes")) + } + if t.Driver == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing task driver")) + } + if t.KillTimeout < 0 { + mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value")) + } + if t.ShutdownDelay < 0 { + mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value")) + } + + // Validate the resources. + if t.Resources == nil { + mErr.Errors = append(mErr.Errors, errors.New("Missing task resources")) + } else if err := t.Resources.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + // Validate the log config + if t.LogConfig == nil { + mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config")) + } else if err := t.LogConfig.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + for idx, constr := range t.Constraints { + if err := constr.Validate(); err != nil { + outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + + switch constr.Operand { + case ConstraintDistinctHosts, ConstraintDistinctProperty: + outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand) + mErr.Errors = append(mErr.Errors, outer) + } + } + + if jobType == JobTypeSystem { + if t.Affinities != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity stanza")) + } + } else { + for idx, affinity := range t.Affinities { + if err := affinity.Validate(); err != nil { + outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + } + + // Validate Services + if err := validateServices(t); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + if t.LogConfig != nil && ephemeralDisk != nil { + logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB) + if ephemeralDisk.SizeMB <= logUsage { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)", + logUsage, ephemeralDisk.SizeMB)) + } + } + + for idx, artifact := range t.Artifacts { + if err := artifact.Validate(); err != nil { + outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + if t.Vault != nil { + if err := t.Vault.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err)) + } + } + + destinations := make(map[string]int, len(t.Templates)) + for idx, tmpl := range t.Templates { + if err := tmpl.Validate(); err != nil { + outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + + if other, ok := destinations[tmpl.DestPath]; ok { + outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other) + mErr.Errors = append(mErr.Errors, outer) + } else { + destinations[tmpl.DestPath] = idx + 1 + } + } + + // Validate the dispatch payload block if there + if t.DispatchPayload != nil { + if err := t.DispatchPayload.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err)) + } + } + + return mErr.ErrorOrNil() +} + +// validateServices takes a task and validates the services within it are valid +// and reference ports that exist. +func validateServices(t *Task) error { + var mErr multierror.Error + + // Ensure that services don't ask for nonexistent ports and their names are + // unique. + servicePorts := make(map[string]map[string]struct{}) + addServicePort := func(label, service string) { + if _, ok := servicePorts[label]; !ok { + servicePorts[label] = map[string]struct{}{} + } + servicePorts[label][service] = struct{}{} + } + knownServices := make(map[string]struct{}) + for i, service := range t.Services { + if err := service.Validate(); err != nil { + outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + + // Ensure that services with the same name are not being registered for + // the same port + if _, ok := knownServices[service.Name+service.PortLabel]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name)) + } + knownServices[service.Name+service.PortLabel] = struct{}{} + + if service.PortLabel != "" { + if service.AddressMode == "driver" { + // Numeric port labels are valid for address_mode=driver + _, err := strconv.Atoi(service.PortLabel) + if err != nil { + // Not a numeric port label, add it to list to check + addServicePort(service.PortLabel, service.Name) + } + } else { + addServicePort(service.PortLabel, service.Name) + } + } + + // Ensure that check names are unique and have valid ports + knownChecks := make(map[string]struct{}) + for _, check := range service.Checks { + if _, ok := knownChecks[check.Name]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name)) + } + knownChecks[check.Name] = struct{}{} + + if !check.RequiresPort() { + // No need to continue validating check if it doesn't need a port + continue + } + + effectivePort := check.PortLabel + if effectivePort == "" { + // Inherits from service + effectivePort = service.PortLabel + } + + if effectivePort == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is missing a port", check.Name)) + continue + } + + isNumeric := false + portNumber, err := strconv.Atoi(effectivePort) + if err == nil { + isNumeric = true + } + + // Numeric ports are fine for address_mode = "driver" + if check.AddressMode == "driver" && isNumeric { + if portNumber <= 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q has invalid numeric port %d", check.Name, portNumber)) + } + continue + } + + if isNumeric { + mErr.Errors = append(mErr.Errors, fmt.Errorf(`check %q cannot use a numeric port %d without setting address_mode="driver"`, check.Name, portNumber)) + continue + } + + // PortLabel must exist, report errors by its parent service + addServicePort(effectivePort, service.Name) + } + } + + // Get the set of port labels. + portLabels := make(map[string]struct{}) + if t.Resources != nil { + for _, network := range t.Resources.Networks { + ports := network.PortLabels() + for portLabel := range ports { + portLabels[portLabel] = struct{}{} + } + } + } + + // Iterate over a sorted list of keys to make error listings stable + keys := make([]string, 0, len(servicePorts)) + for p := range servicePorts { + keys = append(keys, p) + } + sort.Strings(keys) + + // Ensure all ports referenced in services exist. + for _, servicePort := range keys { + services := servicePorts[servicePort] + _, ok := portLabels[servicePort] + if !ok { + names := make([]string, 0, len(services)) + for name := range services { + names = append(names, name) + } + + // Keep order deterministic + sort.Strings(names) + joined := strings.Join(names, ", ") + err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined) + mErr.Errors = append(mErr.Errors, err) + } + } + + // Ensure address mode is valid + return mErr.ErrorOrNil() +} + +func (t *Task) Warnings() error { + var mErr multierror.Error + + // Validate the resources + if t.Resources != nil && t.Resources.IOPS != 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("IOPS has been deprecated as of Nomad 0.9.0. Please remove IOPS from resource stanza.")) + } + + return mErr.ErrorOrNil() +} + +const ( + // TemplateChangeModeNoop marks that no action should be taken if the + // template is re-rendered + TemplateChangeModeNoop = "noop" + + // TemplateChangeModeSignal marks that the task should be signaled if the + // template is re-rendered + TemplateChangeModeSignal = "signal" + + // TemplateChangeModeRestart marks that the task should be restarted if the + // template is re-rendered + TemplateChangeModeRestart = "restart" +) + +var ( + // TemplateChangeModeInvalidError is the error for when an invalid change + // mode is given + TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart") +) + +// Template represents a template configuration to be rendered for a given task +type Template struct { + // SourcePath is the path to the template to be rendered + SourcePath string + + // DestPath is the path to where the template should be rendered + DestPath string + + // EmbeddedTmpl store the raw template. This is useful for smaller templates + // where they are embedded in the job file rather than sent as an artifact + EmbeddedTmpl string + + // ChangeMode indicates what should be done if the template is re-rendered + ChangeMode string + + // ChangeSignal is the signal that should be sent if the change mode + // requires it. + ChangeSignal string + + // Splay is used to avoid coordinated restarts of processes by applying a + // random wait between 0 and the given splay value before signalling the + // application of a change + Splay time.Duration + + // Perms is the permission the file should be written out with. + Perms string + + // LeftDelim and RightDelim are optional configurations to control what + // delimiter is utilized when parsing the template. + LeftDelim string + RightDelim string + + // Envvars enables exposing the template as environment variables + // instead of as a file. The template must be of the form: + // + // VAR_NAME_1={{ key service/my-key }} + // VAR_NAME_2=raw string and {{ env "attr.kernel.name" }} + // + // Lines will be split on the initial "=" with the first part being the + // key name and the second part the value. + // Empty lines and lines starting with # will be ignored, but to avoid + // escaping issues #s within lines will not be treated as comments. + Envvars bool + + // VaultGrace is the grace duration between lease renewal and reacquiring a + // secret. If the lease of a secret is less than the grace, a new secret is + // acquired. + VaultGrace time.Duration +} + +// DefaultTemplate returns a default template. +func DefaultTemplate() *Template { + return &Template{ + ChangeMode: TemplateChangeModeRestart, + Splay: 5 * time.Second, + Perms: "0644", + } +} + +func (t *Template) Copy() *Template { + if t == nil { + return nil + } + copy := new(Template) + *copy = *t + return copy +} + +func (t *Template) Canonicalize() { + if t.ChangeSignal != "" { + t.ChangeSignal = strings.ToUpper(t.ChangeSignal) + } +} + +func (t *Template) Validate() error { + var mErr multierror.Error + + // Verify we have something to render + if t.SourcePath == "" && t.EmbeddedTmpl == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template")) + } + + // Verify we can render somewhere + if t.DestPath == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template")) + } + + // Verify the destination doesn't escape + escaped, err := PathEscapesAllocDir("task", t.DestPath) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) + } else if escaped { + mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory")) + } + + // Verify a proper change mode + switch t.ChangeMode { + case TemplateChangeModeNoop, TemplateChangeModeRestart: + case TemplateChangeModeSignal: + if t.ChangeSignal == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal")) + } + if t.Envvars { + multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates")) + } + default: + multierror.Append(&mErr, TemplateChangeModeInvalidError) + } + + // Verify the splay is positive + if t.Splay < 0 { + multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value")) + } + + // Verify the permissions + if t.Perms != "" { + if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil { + multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err)) + } + } + + if t.VaultGrace.Nanoseconds() < 0 { + multierror.Append(&mErr, fmt.Errorf("Vault grace must be greater than zero: %v < 0", t.VaultGrace)) + } + + return mErr.ErrorOrNil() +} + +// Set of possible states for a task. +const ( + TaskStatePending = "pending" // The task is waiting to be run. + TaskStateRunning = "running" // The task is currently running. + TaskStateDead = "dead" // Terminal state of task. +) + +// TaskState tracks the current state of a task and events that caused state +// transitions. +type TaskState struct { + // The current state of the task. + State string + + // Failed marks a task as having failed + Failed bool + + // Restarts is the number of times the task has restarted + Restarts uint64 + + // LastRestart is the time the task last restarted. It is updated each time the + // task restarts + LastRestart time.Time + + // StartedAt is the time the task is started. It is updated each time the + // task starts + StartedAt time.Time + + // FinishedAt is the time at which the task transitioned to dead and will + // not be started again. + FinishedAt time.Time + + // Series of task events that transition the state of the task. + Events []*TaskEvent +} + +// NewTaskState returns a TaskState initialized in the Pending state. +func NewTaskState() *TaskState { + return &TaskState{ + State: TaskStatePending, + } +} + +// Canonicalize ensures the TaskState has a State set. It should default to +// Pending. +func (ts *TaskState) Canonicalize() { + if ts.State == "" { + ts.State = TaskStatePending + } +} + +func (ts *TaskState) Copy() *TaskState { + if ts == nil { + return nil + } + copy := new(TaskState) + *copy = *ts + + if ts.Events != nil { + copy.Events = make([]*TaskEvent, len(ts.Events)) + for i, e := range ts.Events { + copy.Events[i] = e.Copy() + } + } + return copy +} + +// Successful returns whether a task finished successfully. This doesn't really +// have meaning on a non-batch allocation because a service and system +// allocation should not finish. +func (ts *TaskState) Successful() bool { + return ts.State == TaskStateDead && !ts.Failed +} + +const ( + // TaskSetupFailure indicates that the task could not be started due to a + // a setup failure. + TaskSetupFailure = "Setup Failure" + + // TaskDriveFailure indicates that the task could not be started due to a + // failure in the driver. + TaskDriverFailure = "Driver Failure" + + // TaskReceived signals that the task has been pulled by the client at the + // given timestamp. + TaskReceived = "Received" + + // TaskFailedValidation indicates the task was invalid and as such was not + // run. + TaskFailedValidation = "Failed Validation" + + // TaskStarted signals that the task was started and its timestamp can be + // used to determine the running length of the task. + TaskStarted = "Started" + + // TaskTerminated indicates that the task was started and exited. + TaskTerminated = "Terminated" + + // TaskKilling indicates a kill signal has been sent to the task. + TaskKilling = "Killing" + + // TaskKilled indicates a user has killed the task. + TaskKilled = "Killed" + + // TaskRestarting indicates that task terminated and is being restarted. + TaskRestarting = "Restarting" + + // TaskNotRestarting indicates that the task has failed and is not being + // restarted because it has exceeded its restart policy. + TaskNotRestarting = "Not Restarting" + + // TaskRestartSignal indicates that the task has been signalled to be + // restarted + TaskRestartSignal = "Restart Signaled" + + // TaskSignaling indicates that the task is being signalled. + TaskSignaling = "Signaling" + + // TaskDownloadingArtifacts means the task is downloading the artifacts + // specified in the task. + TaskDownloadingArtifacts = "Downloading Artifacts" + + // TaskArtifactDownloadFailed indicates that downloading the artifacts + // failed. + TaskArtifactDownloadFailed = "Failed Artifact Download" + + // TaskBuildingTaskDir indicates that the task directory/chroot is being + // built. + TaskBuildingTaskDir = "Building Task Directory" + + // TaskSetup indicates the task runner is setting up the task environment + TaskSetup = "Task Setup" + + // TaskDiskExceeded indicates that one of the tasks in a taskgroup has + // exceeded the requested disk resources. + TaskDiskExceeded = "Disk Resources Exceeded" + + // TaskSiblingFailed indicates that a sibling task in the task group has + // failed. + TaskSiblingFailed = "Sibling Task Failed" + + // TaskDriverMessage is an informational event message emitted by + // drivers such as when they're performing a long running action like + // downloading an image. + TaskDriverMessage = "Driver" + + // TaskLeaderDead indicates that the leader task within the has finished. + TaskLeaderDead = "Leader Task Dead" + + // TaskHookFailed indicates that one of the hooks for a task failed. + TaskHookFailed = "Task hook failed" +) + +// TaskEvent is an event that effects the state of a task and contains meta-data +// appropriate to the events type. +type TaskEvent struct { + Type string + Time int64 // Unix Nanosecond timestamp + + Message string // A possible message explaining the termination of the task. + + // DisplayMessage is a human friendly message about the event + DisplayMessage string + + // Details is a map with annotated info about the event + Details map[string]string + + // DEPRECATION NOTICE: The following fields are deprecated and will be removed + // in a future release. Field values are available in the Details map. + + // FailsTask marks whether this event fails the task. + // Deprecated, use Details["fails_task"] to access this. + FailsTask bool + + // Restart fields. + // Deprecated, use Details["restart_reason"] to access this. + RestartReason string + + // Setup Failure fields. + // Deprecated, use Details["setup_error"] to access this. + SetupError string + + // Driver Failure fields. + // Deprecated, use Details["driver_error"] to access this. + DriverError string // A driver error occurred while starting the task. + + // Task Terminated Fields. + + // Deprecated, use Details["exit_code"] to access this. + ExitCode int // The exit code of the task. + + // Deprecated, use Details["signal"] to access this. + Signal int // The signal that terminated the task. + + // Killing fields + // Deprecated, use Details["kill_timeout"] to access this. + KillTimeout time.Duration + + // Task Killed Fields. + // Deprecated, use Details["kill_error"] to access this. + KillError string // Error killing the task. + + // KillReason is the reason the task was killed + // Deprecated, use Details["kill_reason"] to access this. + KillReason string + + // TaskRestarting fields. + // Deprecated, use Details["start_delay"] to access this. + StartDelay int64 // The sleep period before restarting the task in unix nanoseconds. + + // Artifact Download fields + // Deprecated, use Details["download_error"] to access this. + DownloadError string // Error downloading artifacts + + // Validation fields + // Deprecated, use Details["validation_error"] to access this. + ValidationError string // Validation error + + // The maximum allowed task disk size. + // Deprecated, use Details["disk_limit"] to access this. + DiskLimit int64 + + // Name of the sibling task that caused termination of the task that + // the TaskEvent refers to. + // Deprecated, use Details["failed_sibling"] to access this. + FailedSibling string + + // VaultError is the error from token renewal + // Deprecated, use Details["vault_renewal_error"] to access this. + VaultError string + + // TaskSignalReason indicates the reason the task is being signalled. + // Deprecated, use Details["task_signal_reason"] to access this. + TaskSignalReason string + + // TaskSignal is the signal that was sent to the task + // Deprecated, use Details["task_signal"] to access this. + TaskSignal string + + // DriverMessage indicates a driver action being taken. + // Deprecated, use Details["driver_message"] to access this. + DriverMessage string + + // GenericSource is the source of a message. + // Deprecated, is redundant with event type. + GenericSource string +} + +func (event *TaskEvent) PopulateEventDisplayMessage() { + // Build up the description based on the event type. + if event == nil { //TODO(preetha) needs investigation alloc_runner's Run method sends a nil event when sigterming nomad. Why? + return + } + + if event.DisplayMessage != "" { + return + } + + var desc string + switch event.Type { + case TaskSetup: + desc = event.Message + case TaskStarted: + desc = "Task started by client" + case TaskReceived: + desc = "Task received by client" + case TaskFailedValidation: + if event.ValidationError != "" { + desc = event.ValidationError + } else { + desc = "Validation of task failed" + } + case TaskSetupFailure: + if event.SetupError != "" { + desc = event.SetupError + } else { + desc = "Task setup failed" + } + case TaskDriverFailure: + if event.DriverError != "" { + desc = event.DriverError + } else { + desc = "Failed to start task" + } + case TaskDownloadingArtifacts: + desc = "Client is downloading artifacts" + case TaskArtifactDownloadFailed: + if event.DownloadError != "" { + desc = event.DownloadError + } else { + desc = "Failed to download artifacts" + } + case TaskKilling: + if event.KillReason != "" { + desc = event.KillReason + } else if event.KillTimeout != 0 { + desc = fmt.Sprintf("Sent interrupt. Waiting %v before force killing", event.KillTimeout) + } else { + desc = "Sent interrupt" + } + case TaskKilled: + if event.KillError != "" { + desc = event.KillError + } else { + desc = "Task successfully killed" + } + case TaskTerminated: + var parts []string + parts = append(parts, fmt.Sprintf("Exit Code: %d", event.ExitCode)) + + if event.Signal != 0 { + parts = append(parts, fmt.Sprintf("Signal: %d", event.Signal)) + } + + if event.Message != "" { + parts = append(parts, fmt.Sprintf("Exit Message: %q", event.Message)) + } + desc = strings.Join(parts, ", ") + case TaskRestarting: + in := fmt.Sprintf("Task restarting in %v", time.Duration(event.StartDelay)) + if event.RestartReason != "" && event.RestartReason != ReasonWithinPolicy { + desc = fmt.Sprintf("%s - %s", event.RestartReason, in) + } else { + desc = in + } + case TaskNotRestarting: + if event.RestartReason != "" { + desc = event.RestartReason + } else { + desc = "Task exceeded restart policy" + } + case TaskSiblingFailed: + if event.FailedSibling != "" { + desc = fmt.Sprintf("Task's sibling %q failed", event.FailedSibling) + } else { + desc = "Task's sibling failed" + } + case TaskSignaling: + sig := event.TaskSignal + reason := event.TaskSignalReason + + if sig == "" && reason == "" { + desc = "Task being sent a signal" + } else if sig == "" { + desc = reason + } else if reason == "" { + desc = fmt.Sprintf("Task being sent signal %v", sig) + } else { + desc = fmt.Sprintf("Task being sent signal %v: %v", sig, reason) + } + case TaskRestartSignal: + if event.RestartReason != "" { + desc = event.RestartReason + } else { + desc = "Task signaled to restart" + } + case TaskDriverMessage: + desc = event.DriverMessage + case TaskLeaderDead: + desc = "Leader Task in Group dead" + default: + desc = event.Message + } + + event.DisplayMessage = desc +} + +func (te *TaskEvent) GoString() string { + return fmt.Sprintf("%v - %v", te.Time, te.Type) +} + +// SetDisplayMessage sets the display message of TaskEvent +func (te *TaskEvent) SetDisplayMessage(msg string) *TaskEvent { + te.DisplayMessage = msg + return te +} + +// SetMessage sets the message of TaskEvent +func (te *TaskEvent) SetMessage(msg string) *TaskEvent { + te.Message = msg + te.Details["message"] = msg + return te +} + +func (te *TaskEvent) Copy() *TaskEvent { + if te == nil { + return nil + } + copy := new(TaskEvent) + *copy = *te + return copy +} + +func NewTaskEvent(event string) *TaskEvent { + return &TaskEvent{ + Type: event, + Time: time.Now().UnixNano(), + Details: make(map[string]string), + } +} + +// SetSetupError is used to store an error that occurred while setting up the +// task +func (e *TaskEvent) SetSetupError(err error) *TaskEvent { + if err != nil { + e.SetupError = err.Error() + e.Details["setup_error"] = err.Error() + } + return e +} + +func (e *TaskEvent) SetFailsTask() *TaskEvent { + e.FailsTask = true + e.Details["fails_task"] = "true" + return e +} + +func (e *TaskEvent) SetDriverError(err error) *TaskEvent { + if err != nil { + e.DriverError = err.Error() + e.Details["driver_error"] = err.Error() + } + return e +} + +func (e *TaskEvent) SetExitCode(c int) *TaskEvent { + e.ExitCode = c + e.Details["exit_code"] = fmt.Sprintf("%d", c) + return e +} + +func (e *TaskEvent) SetSignal(s int) *TaskEvent { + e.Signal = s + e.Details["signal"] = fmt.Sprintf("%d", s) + return e +} + +func (e *TaskEvent) SetExitMessage(err error) *TaskEvent { + if err != nil { + e.Message = err.Error() + e.Details["exit_message"] = err.Error() + } + return e +} + +func (e *TaskEvent) SetKillError(err error) *TaskEvent { + if err != nil { + e.KillError = err.Error() + e.Details["kill_error"] = err.Error() + } + return e +} + +func (e *TaskEvent) SetKillReason(r string) *TaskEvent { + e.KillReason = r + e.Details["kill_reason"] = r + return e +} + +func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent { + e.StartDelay = int64(delay) + e.Details["start_delay"] = fmt.Sprintf("%d", delay) + return e +} + +func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent { + e.RestartReason = reason + e.Details["restart_reason"] = reason + return e +} + +func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent { + e.TaskSignalReason = r + e.Details["task_signal_reason"] = r + return e +} + +func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent { + e.TaskSignal = s.String() + e.Details["task_signal"] = s.String() + return e +} + +func (e *TaskEvent) SetDownloadError(err error) *TaskEvent { + if err != nil { + e.DownloadError = err.Error() + e.Details["download_error"] = err.Error() + } + return e +} + +func (e *TaskEvent) SetValidationError(err error) *TaskEvent { + if err != nil { + e.ValidationError = err.Error() + e.Details["validation_error"] = err.Error() + } + return e +} + +func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent { + e.KillTimeout = timeout + e.Details["kill_timeout"] = timeout.String() + return e +} + +func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent { + e.DiskLimit = limit + e.Details["disk_limit"] = fmt.Sprintf("%d", limit) + return e +} + +func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent { + e.FailedSibling = sibling + e.Details["failed_sibling"] = sibling + return e +} + +func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent { + if err != nil { + e.VaultError = err.Error() + e.Details["vault_renewal_error"] = err.Error() + } + return e +} + +func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent { + e.DriverMessage = m + e.Details["driver_message"] = m + return e +} + +func (e *TaskEvent) SetOOMKilled(oom bool) *TaskEvent { + e.Details["oom_killed"] = strconv.FormatBool(oom) + return e +} + +// TaskArtifact is an artifact to download before running the task. +type TaskArtifact struct { + // GetterSource is the source to download an artifact using go-getter + GetterSource string + + // GetterOptions are options to use when downloading the artifact using + // go-getter. + GetterOptions map[string]string + + // GetterMode is the go-getter.ClientMode for fetching resources. + // Defaults to "any" but can be set to "file" or "dir". + GetterMode string + + // RelativeDest is the download destination given relative to the task's + // directory. + RelativeDest string +} + +func (ta *TaskArtifact) Copy() *TaskArtifact { + if ta == nil { + return nil + } + nta := new(TaskArtifact) + *nta = *ta + nta.GetterOptions = helper.CopyMapStringString(ta.GetterOptions) + return nta +} + +func (ta *TaskArtifact) GoString() string { + return fmt.Sprintf("%+v", ta) +} + +// Hash creates a unique identifier for a TaskArtifact as the same GetterSource +// may be specified multiple times with different destinations. +func (ta *TaskArtifact) Hash() string { + hash, err := blake2b.New256(nil) + if err != nil { + panic(err) + } + + hash.Write([]byte(ta.GetterSource)) + + // Must iterate over keys in a consistent order + keys := make([]string, 0, len(ta.GetterOptions)) + for k := range ta.GetterOptions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + hash.Write([]byte(k)) + hash.Write([]byte(ta.GetterOptions[k])) + } + + hash.Write([]byte(ta.GetterMode)) + hash.Write([]byte(ta.RelativeDest)) + return base64.RawStdEncoding.EncodeToString(hash.Sum(nil)) +} + +// PathEscapesAllocDir returns if the given path escapes the allocation +// directory. The prefix allows adding a prefix if the path will be joined, for +// example a "task/local" prefix may be provided if the path will be joined +// against that prefix. +func PathEscapesAllocDir(prefix, path string) (bool, error) { + // Verify the destination doesn't escape the tasks directory + alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/")) + if err != nil { + return false, err + } + abs, err := filepath.Abs(filepath.Join(alloc, prefix, path)) + if err != nil { + return false, err + } + rel, err := filepath.Rel(alloc, abs) + if err != nil { + return false, err + } + + return strings.HasPrefix(rel, ".."), nil +} + +func (ta *TaskArtifact) Validate() error { + // Verify the source + var mErr multierror.Error + if ta.GetterSource == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified")) + } + + switch ta.GetterMode { + case "": + // Default to any + ta.GetterMode = GetterModeAny + case GetterModeAny, GetterModeFile, GetterModeDir: + // Ok + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s", + ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir)) + } + + escaped, err := PathEscapesAllocDir("task", ta.RelativeDest) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) + } else if escaped { + mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory")) + } + + if err := ta.validateChecksum(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + return mErr.ErrorOrNil() +} + +func (ta *TaskArtifact) validateChecksum() error { + check, ok := ta.GetterOptions["checksum"] + if !ok { + return nil + } + + // Job struct validation occurs before interpolation resolution can be effective. + // Skip checking if checksum contain variable reference, and artifacts fetching will + // eventually fail, if checksum is indeed invalid. + if args.ContainsEnv(check) { + return nil + } + + check = strings.TrimSpace(check) + if check == "" { + return fmt.Errorf("checksum value cannot be empty") + } + + parts := strings.Split(check, ":") + if l := len(parts); l != 2 { + return fmt.Errorf(`checksum must be given as "type:value"; got %q`, check) + } + + checksumVal := parts[1] + checksumBytes, err := hex.DecodeString(checksumVal) + if err != nil { + return fmt.Errorf("invalid checksum: %v", err) + } + + checksumType := parts[0] + expectedLength := 0 + switch checksumType { + case "md5": + expectedLength = md5.Size + case "sha1": + expectedLength = sha1.Size + case "sha256": + expectedLength = sha256.Size + case "sha512": + expectedLength = sha512.Size + default: + return fmt.Errorf("unsupported checksum type: %s", checksumType) + } + + if len(checksumBytes) != expectedLength { + return fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal) + } + + return nil +} + +const ( + ConstraintDistinctProperty = "distinct_property" + ConstraintDistinctHosts = "distinct_hosts" + ConstraintRegex = "regexp" + ConstraintVersion = "version" + ConstraintSetContains = "set_contains" + ConstraintSetContainsAll = "set_contains_all" + ConstraintSetContainsAny = "set_contains_any" + ConstraintAttributeIsSet = "is_set" + ConstraintAttributeIsNotSet = "is_not_set" +) + +// Constraints are used to restrict placement options. +type Constraint struct { + LTarget string // Left-hand target + RTarget string // Right-hand target + Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near + str string // Memoized string +} + +// Equal checks if two constraints are equal +func (c *Constraint) Equal(o *Constraint) bool { + return c.LTarget == o.LTarget && + c.RTarget == o.RTarget && + c.Operand == o.Operand +} + +func (c *Constraint) Copy() *Constraint { + if c == nil { + return nil + } + nc := new(Constraint) + *nc = *c + return nc +} + +func (c *Constraint) String() string { + if c.str != "" { + return c.str + } + c.str = fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget) + return c.str +} + +func (c *Constraint) Validate() error { + var mErr multierror.Error + if c.Operand == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand")) + } + + // requireLtarget specifies whether the constraint requires an LTarget to be + // provided. + requireLtarget := true + + // Perform additional validation based on operand + switch c.Operand { + case ConstraintDistinctHosts: + requireLtarget = false + case ConstraintSetContainsAll, ConstraintSetContainsAny, ConstraintSetContains: + if c.RTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains constraint requires an RTarget")) + } + case ConstraintRegex: + if _, err := regexp.Compile(c.RTarget); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err)) + } + case ConstraintVersion: + if _, err := version.NewConstraint(c.RTarget); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err)) + } + case ConstraintDistinctProperty: + // If a count is set, make sure it is convertible to a uint64 + if c.RTarget != "" { + count, err := strconv.ParseUint(c.RTarget, 10, 64) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Failed to convert RTarget %q to uint64: %v", c.RTarget, err)) + } else if count < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Distinct Property must have an allowed count of 1 or greater: %d < 1", count)) + } + } + case ConstraintAttributeIsSet, ConstraintAttributeIsNotSet: + if c.RTarget != "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q does not support an RTarget", c.Operand)) + } + case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=": + if c.RTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", c.Operand)) + } + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown constraint type %q", c.Operand)) + } + + // Ensure we have an LTarget for the constraints that need one + if requireLtarget && c.LTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required by constraint")) + } + + return mErr.ErrorOrNil() +} + +// Affinity is used to score placement options based on a weight +type Affinity struct { + LTarget string // Left-hand target + RTarget string // Right-hand target + Operand string // Affinity operand (<=, <, =, !=, >, >=), set_contains_all, set_contains_any + Weight int8 // Weight applied to nodes that match the affinity. Can be negative + str string // Memoized string +} + +// Equal checks if two affinities are equal +func (a *Affinity) Equal(o *Affinity) bool { + return a.LTarget == o.LTarget && + a.RTarget == o.RTarget && + a.Operand == o.Operand && + a.Weight == o.Weight +} + +func (a *Affinity) Copy() *Affinity { + if a == nil { + return nil + } + na := new(Affinity) + *na = *a + return na +} + +func (a *Affinity) String() string { + if a.str != "" { + return a.str + } + a.str = fmt.Sprintf("%s %s %s %v", a.LTarget, a.Operand, a.RTarget, a.Weight) + return a.str +} + +func (a *Affinity) Validate() error { + var mErr multierror.Error + if a.Operand == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing affinity operand")) + } + + // Perform additional validation based on operand + switch a.Operand { + case ConstraintSetContainsAll, ConstraintSetContainsAny, ConstraintSetContains: + if a.RTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains operators require an RTarget")) + } + case ConstraintRegex: + if _, err := regexp.Compile(a.RTarget); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err)) + } + case ConstraintVersion: + if _, err := version.NewConstraint(a.RTarget); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Version affinity is invalid: %v", err)) + } + case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=": + if a.RTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", a.Operand)) + } + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown affinity operator %q", a.Operand)) + } + + // Ensure we have an LTarget + if a.LTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required")) + } + + // Ensure that weight is between -100 and 100, and not zero + if a.Weight == 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Affinity weight cannot be zero")) + } + + if a.Weight > 100 || a.Weight < -100 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Affinity weight must be within the range [-100,100]")) + } + + return mErr.ErrorOrNil() +} + +// Spread is used to specify desired distribution of allocations according to weight +type Spread struct { + // Attribute is the node attribute used as the spread criteria + Attribute string + + // Weight is the relative weight of this spread, useful when there are multiple + // spread and affinities + Weight int8 + + // SpreadTarget is used to describe desired percentages for each attribute value + SpreadTarget []*SpreadTarget + + // Memoized string representation + str string +} + +func (s *Spread) Copy() *Spread { + if s == nil { + return nil + } + ns := new(Spread) + *ns = *s + + ns.SpreadTarget = CopySliceSpreadTarget(s.SpreadTarget) + return ns +} + +func (s *Spread) String() string { + if s.str != "" { + return s.str + } + s.str = fmt.Sprintf("%s %s %v", s.Attribute, s.SpreadTarget, s.Weight) + return s.str +} + +func (s *Spread) Validate() error { + var mErr multierror.Error + if s.Attribute == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing spread attribute")) + } + if s.Weight <= 0 || s.Weight > 100 { + mErr.Errors = append(mErr.Errors, errors.New("Spread stanza must have a positive weight from 0 to 100")) + } + seen := make(map[string]struct{}) + sumPercent := uint32(0) + + for _, target := range s.SpreadTarget { + // Make sure there are no duplicates + _, ok := seen[target.Value] + if !ok { + seen[target.Value] = struct{}{} + } else { + mErr.Errors = append(mErr.Errors, errors.New(fmt.Sprintf("Spread target value %q already defined", target.Value))) + } + if target.Percent < 0 || target.Percent > 100 { + mErr.Errors = append(mErr.Errors, errors.New(fmt.Sprintf("Spread target percentage for value %q must be between 0 and 100", target.Value))) + } + sumPercent += uint32(target.Percent) + } + if sumPercent > 100 { + mErr.Errors = append(mErr.Errors, errors.New(fmt.Sprintf("Sum of spread target percentages must not be greater than 100%%; got %d%%", sumPercent))) + } + return mErr.ErrorOrNil() +} + +// SpreadTarget is used to specify desired percentages for each attribute value +type SpreadTarget struct { + // Value is a single attribute value, like "dc1" + Value string + + // Percent is the desired percentage of allocs + Percent uint8 + + // Memoized string representation + str string +} + +func (s *SpreadTarget) Copy() *SpreadTarget { + if s == nil { + return nil + } + + ns := new(SpreadTarget) + *ns = *s + return ns +} + +func (s *SpreadTarget) String() string { + if s.str != "" { + return s.str + } + s.str = fmt.Sprintf("%q %v%%", s.Value, s.Percent) + return s.str +} + +// EphemeralDisk is an ephemeral disk object +type EphemeralDisk struct { + // Sticky indicates whether the allocation is sticky to a node + Sticky bool + + // SizeMB is the size of the local disk + SizeMB int + + // Migrate determines if Nomad client should migrate the allocation dir for + // sticky allocations + Migrate bool +} + +// DefaultEphemeralDisk returns a EphemeralDisk with default configurations +func DefaultEphemeralDisk() *EphemeralDisk { + return &EphemeralDisk{ + SizeMB: 300, + } +} + +// Validate validates EphemeralDisk +func (d *EphemeralDisk) Validate() error { + if d.SizeMB < 10 { + return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB) + } + return nil +} + +// Copy copies the EphemeralDisk struct and returns a new one +func (d *EphemeralDisk) Copy() *EphemeralDisk { + ld := new(EphemeralDisk) + *ld = *d + return ld +} + +var ( + // VaultUnrecoverableError matches unrecoverable errors returned by a Vault + // server + VaultUnrecoverableError = regexp.MustCompile(`Code:\s+40(0|3|4)`) +) + +const ( + // VaultChangeModeNoop takes no action when a new token is retrieved. + VaultChangeModeNoop = "noop" + + // VaultChangeModeSignal signals the task when a new token is retrieved. + VaultChangeModeSignal = "signal" + + // VaultChangeModeRestart restarts the task when a new token is retrieved. + VaultChangeModeRestart = "restart" +) + +// Vault stores the set of permissions a task needs access to from Vault. +type Vault struct { + // Policies is the set of policies that the task needs access to + Policies []string + + // Env marks whether the Vault Token should be exposed as an environment + // variable + Env bool + + // ChangeMode is used to configure the task's behavior when the Vault + // token changes because the original token could not be renewed in time. + ChangeMode string + + // ChangeSignal is the signal sent to the task when a new token is + // retrieved. This is only valid when using the signal change mode. + ChangeSignal string +} + +func DefaultVaultBlock() *Vault { + return &Vault{ + Env: true, + ChangeMode: VaultChangeModeRestart, + } +} + +// Copy returns a copy of this Vault block. +func (v *Vault) Copy() *Vault { + if v == nil { + return nil + } + + nv := new(Vault) + *nv = *v + return nv +} + +func (v *Vault) Canonicalize() { + if v.ChangeSignal != "" { + v.ChangeSignal = strings.ToUpper(v.ChangeSignal) + } +} + +// Validate returns if the Vault block is valid. +func (v *Vault) Validate() error { + if v == nil { + return nil + } + + var mErr multierror.Error + if len(v.Policies) == 0 { + multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty")) + } + + for _, p := range v.Policies { + if p == "root" { + multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy")) + } + } + + switch v.ChangeMode { + case VaultChangeModeSignal: + if v.ChangeSignal == "" { + multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal)) + } + case VaultChangeModeNoop, VaultChangeModeRestart: + default: + multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode)) + } + + return mErr.ErrorOrNil() +} + +const ( + // DeploymentStatuses are the various states a deployment can be be in + DeploymentStatusRunning = "running" + DeploymentStatusPaused = "paused" + DeploymentStatusFailed = "failed" + DeploymentStatusSuccessful = "successful" + DeploymentStatusCancelled = "cancelled" + + // DeploymentStatusDescriptions are the various descriptions of the states a + // deployment can be in. + DeploymentStatusDescriptionRunning = "Deployment is running" + DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires promotion" + DeploymentStatusDescriptionPaused = "Deployment is paused" + DeploymentStatusDescriptionSuccessful = "Deployment completed successfully" + DeploymentStatusDescriptionStoppedJob = "Cancelled because job is stopped" + DeploymentStatusDescriptionNewerJob = "Cancelled due to newer version of job" + DeploymentStatusDescriptionFailedAllocations = "Failed due to unhealthy allocations" + DeploymentStatusDescriptionProgressDeadline = "Failed due to progress deadline" + DeploymentStatusDescriptionFailedByUser = "Deployment marked as failed" +) + +// DeploymentStatusDescriptionRollback is used to get the status description of +// a deployment when rolling back to an older job. +func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string { + return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion) +} + +// DeploymentStatusDescriptionRollbackNoop is used to get the status description of +// a deployment when rolling back is not possible because it has the same specification +func DeploymentStatusDescriptionRollbackNoop(baseDescription string, jobVersion uint64) string { + return fmt.Sprintf("%s - not rolling back to stable job version %d as current job has same specification", baseDescription, jobVersion) +} + +// DeploymentStatusDescriptionNoRollbackTarget is used to get the status description of +// a deployment when there is no target to rollback to but autorevert is desired. +func DeploymentStatusDescriptionNoRollbackTarget(baseDescription string) string { + return fmt.Sprintf("%s - no stable job version to auto revert to", baseDescription) +} + +// Deployment is the object that represents a job deployment which is used to +// transition a job between versions. +type Deployment struct { + // ID is a generated UUID for the deployment + ID string + + // Namespace is the namespace the deployment is created in + Namespace string + + // JobID is the job the deployment is created for + JobID string + + // JobVersion is the version of the job at which the deployment is tracking + JobVersion uint64 + + // JobModifyIndex is the ModifyIndex of the job which the deployment is + // tracking. + JobModifyIndex uint64 + + // JobSpecModifyIndex is the JobModifyIndex of the job which the + // deployment is tracking. + JobSpecModifyIndex uint64 + + // JobCreateIndex is the create index of the job which the deployment is + // tracking. It is needed so that if the job gets stopped and reran we can + // present the correct list of deployments for the job and not old ones. + JobCreateIndex uint64 + + // TaskGroups is the set of task groups effected by the deployment and their + // current deployment status. + TaskGroups map[string]*DeploymentState + + // The status of the deployment + Status string + + // StatusDescription allows a human readable description of the deployment + // status. + StatusDescription string + + CreateIndex uint64 + ModifyIndex uint64 +} + +// NewDeployment creates a new deployment given the job. +func NewDeployment(job *Job) *Deployment { + return &Deployment{ + ID: uuid.Generate(), + Namespace: job.Namespace, + JobID: job.ID, + JobVersion: job.Version, + JobModifyIndex: job.ModifyIndex, + JobSpecModifyIndex: job.JobModifyIndex, + JobCreateIndex: job.CreateIndex, + Status: DeploymentStatusRunning, + StatusDescription: DeploymentStatusDescriptionRunning, + TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)), + } +} + +func (d *Deployment) Copy() *Deployment { + if d == nil { + return nil + } + + c := &Deployment{} + *c = *d + + c.TaskGroups = nil + if l := len(d.TaskGroups); d.TaskGroups != nil { + c.TaskGroups = make(map[string]*DeploymentState, l) + for tg, s := range d.TaskGroups { + c.TaskGroups[tg] = s.Copy() + } + } + + return c +} + +// Active returns whether the deployment is active or terminal. +func (d *Deployment) Active() bool { + switch d.Status { + case DeploymentStatusRunning, DeploymentStatusPaused: + return true + default: + return false + } +} + +// GetID is a helper for getting the ID when the object may be nil +func (d *Deployment) GetID() string { + if d == nil { + return "" + } + return d.ID +} + +// HasPlacedCanaries returns whether the deployment has placed canaries +func (d *Deployment) HasPlacedCanaries() bool { + if d == nil || len(d.TaskGroups) == 0 { + return false + } + for _, group := range d.TaskGroups { + if len(group.PlacedCanaries) != 0 { + return true + } + } + return false +} + +// RequiresPromotion returns whether the deployment requires promotion to +// continue +func (d *Deployment) RequiresPromotion() bool { + if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning { + return false + } + for _, group := range d.TaskGroups { + if group.DesiredCanaries > 0 && !group.Promoted { + return true + } + } + return false +} + +func (d *Deployment) GoString() string { + base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription) + for group, state := range d.TaskGroups { + base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state) + } + return base +} + +// DeploymentState tracks the state of a deployment for a given task group. +type DeploymentState struct { + // AutoRevert marks whether the task group has indicated the job should be + // reverted on failure + AutoRevert bool + + // ProgressDeadline is the deadline by which an allocation must transition + // to healthy before the deployment is considered failed. + ProgressDeadline time.Duration + + // RequireProgressBy is the time by which an allocation must transition + // to healthy before the deployment is considered failed. + RequireProgressBy time.Time + + // Promoted marks whether the canaries have been promoted + Promoted bool + + // PlacedCanaries is the set of placed canary allocations + PlacedCanaries []string + + // DesiredCanaries is the number of canaries that should be created. + DesiredCanaries int + + // DesiredTotal is the total number of allocations that should be created as + // part of the deployment. + DesiredTotal int + + // PlacedAllocs is the number of allocations that have been placed + PlacedAllocs int + + // HealthyAllocs is the number of allocations that have been marked healthy. + HealthyAllocs int + + // UnhealthyAllocs are allocations that have been marked as unhealthy. + UnhealthyAllocs int +} + +func (d *DeploymentState) GoString() string { + base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal) + base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries) + base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries) + base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted) + base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs) + base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs) + base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs) + base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert) + return base +} + +func (d *DeploymentState) Copy() *DeploymentState { + c := &DeploymentState{} + *c = *d + c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries) + return c +} + +// DeploymentStatusUpdate is used to update the status of a given deployment +type DeploymentStatusUpdate struct { + // DeploymentID is the ID of the deployment to update + DeploymentID string + + // Status is the new status of the deployment. + Status string + + // StatusDescription is the new status description of the deployment. + StatusDescription string +} + +// RescheduleTracker encapsulates previous reschedule events +type RescheduleTracker struct { + Events []*RescheduleEvent +} + +func (rt *RescheduleTracker) Copy() *RescheduleTracker { + if rt == nil { + return nil + } + nt := &RescheduleTracker{} + *nt = *rt + rescheduleEvents := make([]*RescheduleEvent, 0, len(rt.Events)) + for _, tracker := range rt.Events { + rescheduleEvents = append(rescheduleEvents, tracker.Copy()) + } + nt.Events = rescheduleEvents + return nt +} + +// RescheduleEvent is used to keep track of previous attempts at rescheduling an allocation +type RescheduleEvent struct { + // RescheduleTime is the timestamp of a reschedule attempt + RescheduleTime int64 + + // PrevAllocID is the ID of the previous allocation being restarted + PrevAllocID string + + // PrevNodeID is the node ID of the previous allocation + PrevNodeID string + + // Delay is the reschedule delay associated with the attempt + Delay time.Duration +} + +func NewRescheduleEvent(rescheduleTime int64, prevAllocID string, prevNodeID string, delay time.Duration) *RescheduleEvent { + return &RescheduleEvent{RescheduleTime: rescheduleTime, + PrevAllocID: prevAllocID, + PrevNodeID: prevNodeID, + Delay: delay} +} + +func (re *RescheduleEvent) Copy() *RescheduleEvent { + if re == nil { + return nil + } + copy := new(RescheduleEvent) + *copy = *re + return copy +} + +// DesiredTransition is used to mark an allocation as having a desired state +// transition. This information can be used by the scheduler to make the +// correct decision. +type DesiredTransition struct { + // Migrate is used to indicate that this allocation should be stopped and + // migrated to another node. + Migrate *bool + + // Reschedule is used to indicate that this allocation is eligible to be + // rescheduled. Most allocations are automatically eligible for + // rescheduling, so this field is only required when an allocation is not + // automatically eligible. An example is an allocation that is part of a + // deployment. + Reschedule *bool + + // ForceReschedule is used to indicate that this allocation must be rescheduled. + // This field is only used when operators want to force a placement even if + // a failed allocation is not eligible to be rescheduled + ForceReschedule *bool +} + +// Merge merges the two desired transitions, preferring the values from the +// passed in object. +func (d *DesiredTransition) Merge(o *DesiredTransition) { + if o.Migrate != nil { + d.Migrate = o.Migrate + } + + if o.Reschedule != nil { + d.Reschedule = o.Reschedule + } + + if o.ForceReschedule != nil { + d.ForceReschedule = o.ForceReschedule + } +} + +// ShouldMigrate returns whether the transition object dictates a migration. +func (d *DesiredTransition) ShouldMigrate() bool { + return d.Migrate != nil && *d.Migrate +} + +// ShouldReschedule returns whether the transition object dictates a +// rescheduling. +func (d *DesiredTransition) ShouldReschedule() bool { + return d.Reschedule != nil && *d.Reschedule +} + +// ShouldForceReschedule returns whether the transition object dictates a +// forced rescheduling. +func (d *DesiredTransition) ShouldForceReschedule() bool { + if d == nil { + return false + } + return d.ForceReschedule != nil && *d.ForceReschedule +} + +const ( + AllocDesiredStatusRun = "run" // Allocation should run + AllocDesiredStatusStop = "stop" // Allocation should stop + AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted +) + +const ( + AllocClientStatusPending = "pending" + AllocClientStatusRunning = "running" + AllocClientStatusComplete = "complete" + AllocClientStatusFailed = "failed" + AllocClientStatusLost = "lost" +) + +// Allocation is used to allocate the placement of a task group to a node. +type Allocation struct { + // ID of the allocation (UUID) + ID string + + // Namespace is the namespace the allocation is created in + Namespace string + + // ID of the evaluation that generated this allocation + EvalID string + + // Name is a logical name of the allocation. + Name string + + // NodeID is the node this is being placed on + NodeID string + + // Job is the parent job of the task group being allocated. + // This is copied at allocation time to avoid issues if the job + // definition is updated. + JobID string + Job *Job + + // TaskGroup is the name of the task group that should be run + TaskGroup string + + // COMPAT(0.11): Remove in 0.11 + // Resources is the total set of resources allocated as part + // of this allocation of the task group. Dynamic ports will be set by + // the scheduler. + Resources *Resources + + // COMPAT(0.11): Remove in 0.11 + // SharedResources are the resources that are shared by all the tasks in an + // allocation + SharedResources *Resources + + // COMPAT(0.11): Remove in 0.11 + // TaskResources is the set of resources allocated to each + // task. These should sum to the total Resources. Dynamic ports will be + // set by the scheduler. + TaskResources map[string]*Resources + + // AllocatedResources is the total resources allocated for the task group. + AllocatedResources *AllocatedResources + + // Metrics associated with this allocation + Metrics *AllocMetric + + // Desired Status of the allocation on the client + DesiredStatus string + + // DesiredStatusDescription is meant to provide more human useful information + DesiredDescription string + + // DesiredTransition is used to indicate that a state transition + // is desired for a given reason. + DesiredTransition DesiredTransition + + // Status of the allocation on the client + ClientStatus string + + // ClientStatusDescription is meant to provide more human useful information + ClientDescription string + + // TaskStates stores the state of each task, + TaskStates map[string]*TaskState + + // PreviousAllocation is the allocation that this allocation is replacing + PreviousAllocation string + + // NextAllocation is the allocation that this allocation is being replaced by + NextAllocation string + + // DeploymentID identifies an allocation as being created from a + // particular deployment + DeploymentID string + + // DeploymentStatus captures the status of the allocation as part of the + // given deployment + DeploymentStatus *AllocDeploymentStatus + + // RescheduleTrackers captures details of previous reschedule attempts of the allocation + RescheduleTracker *RescheduleTracker + + // FollowupEvalID captures a follow up evaluation created to handle a failed allocation + // that can be rescheduled in the future + FollowupEvalID string + + // PreemptedAllocations captures IDs of any allocations that were preempted + // in order to place this allocation + PreemptedAllocations []string + + // PreemptedByAllocation tracks the alloc ID of the allocation that caused this allocation + // to stop running because it got preempted + PreemptedByAllocation string + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 + + // AllocModifyIndex is not updated when the client updates allocations. This + // lets the client pull only the allocs updated by the server. + AllocModifyIndex uint64 + + // CreateTime is the time the allocation has finished scheduling and been + // verified by the plan applier. + CreateTime int64 + + // ModifyTime is the time the allocation was last updated. + ModifyTime int64 +} + +// Index returns the index of the allocation. If the allocation is from a task +// group with count greater than 1, there will be multiple allocations for it. +func (a *Allocation) Index() uint { + l := len(a.Name) + prefix := len(a.JobID) + len(a.TaskGroup) + 2 + if l <= 3 || l <= prefix { + return uint(0) + } + + strNum := a.Name[prefix : len(a.Name)-1] + num, _ := strconv.Atoi(strNum) + return uint(num) +} + +func (a *Allocation) Copy() *Allocation { + return a.copyImpl(true) +} + +// Copy provides a copy of the allocation but doesn't deep copy the job +func (a *Allocation) CopySkipJob() *Allocation { + return a.copyImpl(false) +} + +func (a *Allocation) copyImpl(job bool) *Allocation { + if a == nil { + return nil + } + na := new(Allocation) + *na = *a + + if job { + na.Job = na.Job.Copy() + } + + na.AllocatedResources = na.AllocatedResources.Copy() + na.Resources = na.Resources.Copy() + na.SharedResources = na.SharedResources.Copy() + + if a.TaskResources != nil { + tr := make(map[string]*Resources, len(na.TaskResources)) + for task, resource := range na.TaskResources { + tr[task] = resource.Copy() + } + na.TaskResources = tr + } + + na.Metrics = na.Metrics.Copy() + na.DeploymentStatus = na.DeploymentStatus.Copy() + + if a.TaskStates != nil { + ts := make(map[string]*TaskState, len(na.TaskStates)) + for task, state := range na.TaskStates { + ts[task] = state.Copy() + } + na.TaskStates = ts + } + + na.RescheduleTracker = a.RescheduleTracker.Copy() + na.PreemptedAllocations = helper.CopySliceString(a.PreemptedAllocations) + return na +} + +// TerminalStatus returns if the desired or actual status is terminal and +// will no longer transition. +func (a *Allocation) TerminalStatus() bool { + // First check the desired state and if that isn't terminal, check client + // state. + return a.ServerTerminalStatus() || a.ClientTerminalStatus() +} + +// ServerTerminalStatus returns true if the desired state of the allocation is terminal +func (a *Allocation) ServerTerminalStatus() bool { + switch a.DesiredStatus { + case AllocDesiredStatusStop, AllocDesiredStatusEvict: + return true + default: + return false + } +} + +// ClientTerminalStatus returns if the client status is terminal and will no longer transition +func (a *Allocation) ClientTerminalStatus() bool { + switch a.ClientStatus { + case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost: + return true + default: + return false + } +} + +// ShouldReschedule returns if the allocation is eligible to be rescheduled according +// to its status and ReschedulePolicy given its failure time +func (a *Allocation) ShouldReschedule(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool { + // First check the desired state + switch a.DesiredStatus { + case AllocDesiredStatusStop, AllocDesiredStatusEvict: + return false + default: + } + switch a.ClientStatus { + case AllocClientStatusFailed: + return a.RescheduleEligible(reschedulePolicy, failTime) + default: + return false + } +} + +// RescheduleEligible returns if the allocation is eligible to be rescheduled according +// to its ReschedulePolicy and the current state of its reschedule trackers +func (a *Allocation) RescheduleEligible(reschedulePolicy *ReschedulePolicy, failTime time.Time) bool { + if reschedulePolicy == nil { + return false + } + attempts := reschedulePolicy.Attempts + interval := reschedulePolicy.Interval + enabled := attempts > 0 || reschedulePolicy.Unlimited + if !enabled { + return false + } + if reschedulePolicy.Unlimited { + return true + } + // Early return true if there are no attempts yet and the number of allowed attempts is > 0 + if (a.RescheduleTracker == nil || len(a.RescheduleTracker.Events) == 0) && attempts > 0 { + return true + } + attempted := 0 + for j := len(a.RescheduleTracker.Events) - 1; j >= 0; j-- { + lastAttempt := a.RescheduleTracker.Events[j].RescheduleTime + timeDiff := failTime.UTC().UnixNano() - lastAttempt + if timeDiff < interval.Nanoseconds() { + attempted += 1 + } + } + return attempted < attempts +} + +// LastEventTime is the time of the last task event in the allocation. +// It is used to determine allocation failure time. If the FinishedAt field +// is not set, the alloc's modify time is used +func (a *Allocation) LastEventTime() time.Time { + var lastEventTime time.Time + if a.TaskStates != nil { + for _, s := range a.TaskStates { + if lastEventTime.IsZero() || s.FinishedAt.After(lastEventTime) { + lastEventTime = s.FinishedAt + } + } + } + + if lastEventTime.IsZero() { + return time.Unix(0, a.ModifyTime).UTC() + } + return lastEventTime +} + +// ReschedulePolicy returns the reschedule policy based on the task group +func (a *Allocation) ReschedulePolicy() *ReschedulePolicy { + tg := a.Job.LookupTaskGroup(a.TaskGroup) + if tg == nil { + return nil + } + return tg.ReschedulePolicy +} + +// NextRescheduleTime returns a time on or after which the allocation is eligible to be rescheduled, +// and whether the next reschedule time is within policy's interval if the policy doesn't allow unlimited reschedules +func (a *Allocation) NextRescheduleTime() (time.Time, bool) { + failTime := a.LastEventTime() + reschedulePolicy := a.ReschedulePolicy() + if a.DesiredStatus == AllocDesiredStatusStop || a.ClientStatus != AllocClientStatusFailed || failTime.IsZero() || reschedulePolicy == nil { + return time.Time{}, false + } + + nextDelay := a.NextDelay() + nextRescheduleTime := failTime.Add(nextDelay) + rescheduleEligible := reschedulePolicy.Unlimited || (reschedulePolicy.Attempts > 0 && a.RescheduleTracker == nil) + if reschedulePolicy.Attempts > 0 && a.RescheduleTracker != nil && a.RescheduleTracker.Events != nil { + // Check for eligibility based on the interval if max attempts is set + attempted := 0 + for j := len(a.RescheduleTracker.Events) - 1; j >= 0; j-- { + lastAttempt := a.RescheduleTracker.Events[j].RescheduleTime + timeDiff := failTime.UTC().UnixNano() - lastAttempt + if timeDiff < reschedulePolicy.Interval.Nanoseconds() { + attempted += 1 + } + } + rescheduleEligible = attempted < reschedulePolicy.Attempts && nextDelay < reschedulePolicy.Interval + } + return nextRescheduleTime, rescheduleEligible +} + +// NextDelay returns a duration after which the allocation can be rescheduled. +// It is calculated according to the delay function and previous reschedule attempts. +func (a *Allocation) NextDelay() time.Duration { + policy := a.ReschedulePolicy() + // Can be nil if the task group was updated to remove its reschedule policy + if policy == nil { + return 0 + } + delayDur := policy.Delay + if a.RescheduleTracker == nil || a.RescheduleTracker.Events == nil || len(a.RescheduleTracker.Events) == 0 { + return delayDur + } + events := a.RescheduleTracker.Events + switch policy.DelayFunction { + case "exponential": + delayDur = a.RescheduleTracker.Events[len(a.RescheduleTracker.Events)-1].Delay * 2 + case "fibonacci": + if len(events) >= 2 { + fibN1Delay := events[len(events)-1].Delay + fibN2Delay := events[len(events)-2].Delay + // Handle reset of delay ceiling which should cause + // a new series to start + if fibN2Delay == policy.MaxDelay && fibN1Delay == policy.Delay { + delayDur = fibN1Delay + } else { + delayDur = fibN1Delay + fibN2Delay + } + } + default: + return delayDur + } + if policy.MaxDelay > 0 && delayDur > policy.MaxDelay { + delayDur = policy.MaxDelay + // check if delay needs to be reset + + lastRescheduleEvent := a.RescheduleTracker.Events[len(a.RescheduleTracker.Events)-1] + timeDiff := a.LastEventTime().UTC().UnixNano() - lastRescheduleEvent.RescheduleTime + if timeDiff > delayDur.Nanoseconds() { + delayDur = policy.Delay + } + + } + + return delayDur +} + +// Terminated returns if the allocation is in a terminal state on a client. +func (a *Allocation) Terminated() bool { + if a.ClientStatus == AllocClientStatusFailed || + a.ClientStatus == AllocClientStatusComplete || + a.ClientStatus == AllocClientStatusLost { + return true + } + return false +} + +// RanSuccessfully returns whether the client has ran the allocation and all +// tasks finished successfully. Critically this function returns whether the +// allocation has ran to completion and not just that the alloc has converged to +// its desired state. That is to say that a batch allocation must have finished +// with exit code 0 on all task groups. This doesn't really have meaning on a +// non-batch allocation because a service and system allocation should not +// finish. +func (a *Allocation) RanSuccessfully() bool { + // Handle the case the client hasn't started the allocation. + if len(a.TaskStates) == 0 { + return false + } + + // Check to see if all the tasks finished successfully in the allocation + allSuccess := true + for _, state := range a.TaskStates { + allSuccess = allSuccess && state.Successful() + } + + return allSuccess +} + +// ShouldMigrate returns if the allocation needs data migration +func (a *Allocation) ShouldMigrate() bool { + if a.PreviousAllocation == "" { + return false + } + + if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict { + return false + } + + tg := a.Job.LookupTaskGroup(a.TaskGroup) + + // if the task group is nil or the ephemeral disk block isn't present then + // we won't migrate + if tg == nil || tg.EphemeralDisk == nil { + return false + } + + // We won't migrate any data is the user hasn't enabled migration or the + // disk is not marked as sticky + if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky { + return false + } + + return true +} + +// SetEventDisplayMessage populates the display message if its not already set, +// a temporary fix to handle old allocations that don't have it. +// This method will be removed in a future release. +func (a *Allocation) SetEventDisplayMessages() { + setDisplayMsg(a.TaskStates) +} + +// COMPAT(0.11): Remove in 0.11 +// ComparableResources returns the resouces on the allocation +// handling upgrade paths. After 0.11 calls to this should be replaced with: +// alloc.AllocatedResources.Comparable() +func (a *Allocation) ComparableResources() *ComparableResources { + // ALloc already has 0.9+ behavior + if a.AllocatedResources != nil { + return a.AllocatedResources.Comparable() + } + + var resources *Resources + if a.Resources != nil { + resources = a.Resources + } else if a.TaskResources != nil { + resources = new(Resources) + resources.Add(a.SharedResources) + for _, taskResource := range a.TaskResources { + resources.Add(taskResource) + } + } + + // Upgrade path + return &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: int64(resources.CPU), + }, + Memory: AllocatedMemoryResources{ + MemoryMB: int64(resources.MemoryMB), + }, + Networks: resources.Networks, + }, + Shared: AllocatedSharedResources{ + DiskMB: int64(resources.DiskMB), + }, + } +} + +// LookupTask by name from the Allocation. Returns nil if the Job is not set, the +// TaskGroup does not exist, or the task name cannot be found. +func (a *Allocation) LookupTask(name string) *Task { + if a.Job == nil { + return nil + } + + tg := a.Job.LookupTaskGroup(a.TaskGroup) + if tg == nil { + return nil + } + + return tg.LookupTask(name) +} + +// Stub returns a list stub for the allocation +func (a *Allocation) Stub() *AllocListStub { + return &AllocListStub{ + ID: a.ID, + EvalID: a.EvalID, + Name: a.Name, + Namespace: a.Namespace, + NodeID: a.NodeID, + JobID: a.JobID, + JobType: a.Job.Type, + JobVersion: a.Job.Version, + TaskGroup: a.TaskGroup, + DesiredStatus: a.DesiredStatus, + DesiredDescription: a.DesiredDescription, + ClientStatus: a.ClientStatus, + ClientDescription: a.ClientDescription, + DesiredTransition: a.DesiredTransition, + TaskStates: a.TaskStates, + DeploymentStatus: a.DeploymentStatus, + FollowupEvalID: a.FollowupEvalID, + RescheduleTracker: a.RescheduleTracker, + CreateIndex: a.CreateIndex, + ModifyIndex: a.ModifyIndex, + CreateTime: a.CreateTime, + ModifyTime: a.ModifyTime, + } +} + +// AllocListStub is used to return a subset of alloc information +type AllocListStub struct { + ID string + EvalID string + Name string + Namespace string + NodeID string + JobID string + JobType string + JobVersion uint64 + TaskGroup string + DesiredStatus string + DesiredDescription string + ClientStatus string + ClientDescription string + DesiredTransition DesiredTransition + TaskStates map[string]*TaskState + DeploymentStatus *AllocDeploymentStatus + FollowupEvalID string + RescheduleTracker *RescheduleTracker + CreateIndex uint64 + ModifyIndex uint64 + CreateTime int64 + ModifyTime int64 +} + +// SetEventDisplayMessage populates the display message if its not already set, +// a temporary fix to handle old allocations that don't have it. +// This method will be removed in a future release. +func (a *AllocListStub) SetEventDisplayMessages() { + setDisplayMsg(a.TaskStates) +} + +func setDisplayMsg(taskStates map[string]*TaskState) { + if taskStates != nil { + for _, taskState := range taskStates { + for _, event := range taskState.Events { + event.PopulateEventDisplayMessage() + } + } + } +} + +// AllocMetric is used to track various metrics while attempting +// to make an allocation. These are used to debug a job, or to better +// understand the pressure within the system. +type AllocMetric struct { + // NodesEvaluated is the number of nodes that were evaluated + NodesEvaluated int + + // NodesFiltered is the number of nodes filtered due to a constraint + NodesFiltered int + + // NodesAvailable is the number of nodes available for evaluation per DC. + NodesAvailable map[string]int + + // ClassFiltered is the number of nodes filtered by class + ClassFiltered map[string]int + + // ConstraintFiltered is the number of failures caused by constraint + ConstraintFiltered map[string]int + + // NodesExhausted is the number of nodes skipped due to being + // exhausted of at least one resource + NodesExhausted int + + // ClassExhausted is the number of nodes exhausted by class + ClassExhausted map[string]int + + // DimensionExhausted provides the count by dimension or reason + DimensionExhausted map[string]int + + // QuotaExhausted provides the exhausted dimensions + QuotaExhausted []string + + // Scores is the scores of the final few nodes remaining + // for placement. The top score is typically selected. + // Deprecated: Replaced by ScoreMetaData in Nomad 0.9 + Scores map[string]float64 + + // ScoreMetaData is a slice of top scoring nodes displayed in the CLI + ScoreMetaData []*NodeScoreMeta + + // nodeScoreMeta is used to keep scores for a single node id. It is cleared out after + // we receive normalized score during the last step of the scoring stack. + nodeScoreMeta *NodeScoreMeta + + // topScores is used to maintain a heap of the top K nodes with + // the highest normalized score + topScores *kheap.ScoreHeap + + // AllocationTime is a measure of how long the allocation + // attempt took. This can affect performance and SLAs. + AllocationTime time.Duration + + // CoalescedFailures indicates the number of other + // allocations that were coalesced into this failed allocation. + // This is to prevent creating many failed allocations for a + // single task group. + CoalescedFailures int +} + +func (a *AllocMetric) Copy() *AllocMetric { + if a == nil { + return nil + } + na := new(AllocMetric) + *na = *a + na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable) + na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered) + na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered) + na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted) + na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted) + na.QuotaExhausted = helper.CopySliceString(na.QuotaExhausted) + na.Scores = helper.CopyMapStringFloat64(na.Scores) + na.ScoreMetaData = CopySliceNodeScoreMeta(na.ScoreMetaData) + return na +} + +func (a *AllocMetric) EvaluateNode() { + a.NodesEvaluated += 1 +} + +func (a *AllocMetric) FilterNode(node *Node, constraint string) { + a.NodesFiltered += 1 + if node != nil && node.NodeClass != "" { + if a.ClassFiltered == nil { + a.ClassFiltered = make(map[string]int) + } + a.ClassFiltered[node.NodeClass] += 1 + } + if constraint != "" { + if a.ConstraintFiltered == nil { + a.ConstraintFiltered = make(map[string]int) + } + a.ConstraintFiltered[constraint] += 1 + } +} + +func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) { + a.NodesExhausted += 1 + if node != nil && node.NodeClass != "" { + if a.ClassExhausted == nil { + a.ClassExhausted = make(map[string]int) + } + a.ClassExhausted[node.NodeClass] += 1 + } + if dimension != "" { + if a.DimensionExhausted == nil { + a.DimensionExhausted = make(map[string]int) + } + a.DimensionExhausted[dimension] += 1 + } +} + +func (a *AllocMetric) ExhaustQuota(dimensions []string) { + if a.QuotaExhausted == nil { + a.QuotaExhausted = make([]string, 0, len(dimensions)) + } + + a.QuotaExhausted = append(a.QuotaExhausted, dimensions...) +} + +// ScoreNode is used to gather top K scoring nodes in a heap +func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) { + // Create nodeScoreMeta lazily if its the first time or if its a new node + if a.nodeScoreMeta == nil || a.nodeScoreMeta.NodeID != node.ID { + a.nodeScoreMeta = &NodeScoreMeta{ + NodeID: node.ID, + Scores: make(map[string]float64), + } + } + if name == NormScorerName { + a.nodeScoreMeta.NormScore = score + // Once we have the normalized score we can push to the heap + // that tracks top K by normalized score + + // Create the heap if its not there already + if a.topScores == nil { + a.topScores = kheap.NewScoreHeap(MaxRetainedNodeScores) + } + heap.Push(a.topScores, a.nodeScoreMeta) + + // Clear out this entry because its now in the heap + a.nodeScoreMeta = nil + } else { + a.nodeScoreMeta.Scores[name] = score + } +} + +// PopulateScoreMetaData populates a map of scorer to scoring metadata +// The map is populated by popping elements from a heap of top K scores +// maintained per scorer +func (a *AllocMetric) PopulateScoreMetaData() { + if a.topScores == nil { + return + } + + if a.ScoreMetaData == nil { + a.ScoreMetaData = make([]*NodeScoreMeta, a.topScores.Len()) + } + heapItems := a.topScores.GetItemsReverse() + for i, item := range heapItems { + a.ScoreMetaData[i] = item.(*NodeScoreMeta) + } +} + +// NodeScoreMeta captures scoring meta data derived from +// different scoring factors. +type NodeScoreMeta struct { + NodeID string + Scores map[string]float64 + NormScore float64 +} + +func (s *NodeScoreMeta) Copy() *NodeScoreMeta { + if s == nil { + return nil + } + ns := new(NodeScoreMeta) + *ns = *s + return ns +} + +func (s *NodeScoreMeta) String() string { + return fmt.Sprintf("%s %f %v", s.NodeID, s.NormScore, s.Scores) +} + +func (s *NodeScoreMeta) Score() float64 { + return s.NormScore +} + +func (s *NodeScoreMeta) Data() interface{} { + return s +} + +// AllocDeploymentStatus captures the status of the allocation as part of the +// deployment. This can include things like if the allocation has been marked as +// healthy. +type AllocDeploymentStatus struct { + // Healthy marks whether the allocation has been marked healthy or unhealthy + // as part of a deployment. It can be unset if it has neither been marked + // healthy or unhealthy. + Healthy *bool + + // Timestamp is the time at which the health status was set. + Timestamp time.Time + + // Canary marks whether the allocation is a canary or not. A canary that has + // been promoted will have this field set to false. + Canary bool + + // ModifyIndex is the raft index in which the deployment status was last + // changed. + ModifyIndex uint64 +} + +// HasHealth returns true if the allocation has its health set. +func (a *AllocDeploymentStatus) HasHealth() bool { + return a != nil && a.Healthy != nil +} + +// IsHealthy returns if the allocation is marked as healthy as part of a +// deployment +func (a *AllocDeploymentStatus) IsHealthy() bool { + if a == nil { + return false + } + + return a.Healthy != nil && *a.Healthy +} + +// IsUnhealthy returns if the allocation is marked as unhealthy as part of a +// deployment +func (a *AllocDeploymentStatus) IsUnhealthy() bool { + if a == nil { + return false + } + + return a.Healthy != nil && !*a.Healthy +} + +// IsCanary returns if the allocation is marked as a canary +func (a *AllocDeploymentStatus) IsCanary() bool { + if a == nil { + return false + } + + return a.Canary +} + +func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus { + if a == nil { + return nil + } + + c := new(AllocDeploymentStatus) + *c = *a + + if a.Healthy != nil { + c.Healthy = helper.BoolToPtr(*a.Healthy) + } + + return c +} + +const ( + EvalStatusBlocked = "blocked" + EvalStatusPending = "pending" + EvalStatusComplete = "complete" + EvalStatusFailed = "failed" + EvalStatusCancelled = "canceled" +) + +const ( + EvalTriggerJobRegister = "job-register" + EvalTriggerJobDeregister = "job-deregister" + EvalTriggerPeriodicJob = "periodic-job" + EvalTriggerNodeDrain = "node-drain" + EvalTriggerNodeUpdate = "node-update" + EvalTriggerScheduled = "scheduled" + EvalTriggerRollingUpdate = "rolling-update" + EvalTriggerDeploymentWatcher = "deployment-watcher" + EvalTriggerFailedFollowUp = "failed-follow-up" + EvalTriggerMaxPlans = "max-plan-attempts" + EvalTriggerRetryFailedAlloc = "alloc-failure" + EvalTriggerQueuedAllocs = "queued-allocs" + EvalTriggerPreemption = "preemption" +) + +const ( + // CoreJobEvalGC is used for the garbage collection of evaluations + // and allocations. We periodically scan evaluations in a terminal state, + // in which all the corresponding allocations are also terminal. We + // delete these out of the system to bound the state. + CoreJobEvalGC = "eval-gc" + + // CoreJobNodeGC is used for the garbage collection of failed nodes. + // We periodically scan nodes in a terminal state, and if they have no + // corresponding allocations we delete these out of the system. + CoreJobNodeGC = "node-gc" + + // CoreJobJobGC is used for the garbage collection of eligible jobs. We + // periodically scan garbage collectible jobs and check if both their + // evaluations and allocations are terminal. If so, we delete these out of + // the system. + CoreJobJobGC = "job-gc" + + // CoreJobDeploymentGC is used for the garbage collection of eligible + // deployments. We periodically scan garbage collectible deployments and + // check if they are terminal. If so, we delete these out of the system. + CoreJobDeploymentGC = "deployment-gc" + + // CoreJobForceGC is used to force garbage collection of all GCable objects. + CoreJobForceGC = "force-gc" +) + +// Evaluation is used anytime we need to apply business logic as a result +// of a change to our desired state (job specification) or the emergent state +// (registered nodes). When the inputs change, we need to "evaluate" them, +// potentially taking action (allocation of work) or doing nothing if the state +// of the world does not require it. +type Evaluation struct { + // ID is a randomly generated UUID used for this evaluation. This + // is assigned upon the creation of the evaluation. + ID string + + // Namespace is the namespace the evaluation is created in + Namespace string + + // Priority is used to control scheduling importance and if this job + // can preempt other jobs. + Priority int + + // Type is used to control which schedulers are available to handle + // this evaluation. + Type string + + // TriggeredBy is used to give some insight into why this Eval + // was created. (Job change, node failure, alloc failure, etc). + TriggeredBy string + + // JobID is the job this evaluation is scoped to. Evaluations cannot + // be run in parallel for a given JobID, so we serialize on this. + JobID string + + // JobModifyIndex is the modify index of the job at the time + // the evaluation was created + JobModifyIndex uint64 + + // NodeID is the node that was affected triggering the evaluation. + NodeID string + + // NodeModifyIndex is the modify index of the node at the time + // the evaluation was created + NodeModifyIndex uint64 + + // DeploymentID is the ID of the deployment that triggered the evaluation. + DeploymentID string + + // Status of the evaluation + Status string + + // StatusDescription is meant to provide more human useful information + StatusDescription string + + // Wait is a minimum wait time for running the eval. This is used to + // support a rolling upgrade in versions prior to 0.7.0 + // Deprecated + Wait time.Duration + + // WaitUntil is the time when this eval should be run. This is used to + // supported delayed rescheduling of failed allocations + WaitUntil time.Time + + // NextEval is the evaluation ID for the eval created to do a followup. + // This is used to support rolling upgrades and failed-follow-up evals, where + // we need a chain of evaluations. + NextEval string + + // PreviousEval is the evaluation ID for the eval creating this one to do a followup. + // This is used to support rolling upgrades and failed-follow-up evals, where + // we need a chain of evaluations. + PreviousEval string + + // BlockedEval is the evaluation ID for a created blocked eval. A + // blocked eval will be created if all allocations could not be placed due + // to constraints or lacking resources. + BlockedEval string + + // FailedTGAllocs are task groups which have allocations that could not be + // made, but the metrics are persisted so that the user can use the feedback + // to determine the cause. + FailedTGAllocs map[string]*AllocMetric + + // ClassEligibility tracks computed node classes that have been explicitly + // marked as eligible or ineligible. + ClassEligibility map[string]bool + + // QuotaLimitReached marks whether a quota limit was reached for the + // evaluation. + QuotaLimitReached string + + // EscapedComputedClass marks whether the job has constraints that are not + // captured by computed node classes. + EscapedComputedClass bool + + // AnnotatePlan triggers the scheduler to provide additional annotations + // during the evaluation. This should not be set during normal operations. + AnnotatePlan bool + + // QueuedAllocations is the number of unplaced allocations at the time the + // evaluation was processed. The map is keyed by Task Group names. + QueuedAllocations map[string]int + + // LeaderACL provides the ACL token to when issuing RPCs back to the + // leader. This will be a valid management token as long as the leader is + // active. This should not ever be exposed via the API. + LeaderACL string + + // SnapshotIndex is the Raft index of the snapshot used to process the + // evaluation. The index will either be set when it has gone through the + // scheduler or if a blocked evaluation is being created. The index is set + // in this case so we can determine if an early unblocking is required since + // capacity has changed since the evaluation was created. This can result in + // the SnapshotIndex being less than the CreateIndex. + SnapshotIndex uint64 + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// TerminalStatus returns if the current status is terminal and +// will no longer transition. +func (e *Evaluation) TerminalStatus() bool { + switch e.Status { + case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled: + return true + default: + return false + } +} + +func (e *Evaluation) GoString() string { + return fmt.Sprintf("", e.ID, e.JobID, e.Namespace) +} + +func (e *Evaluation) Copy() *Evaluation { + if e == nil { + return nil + } + ne := new(Evaluation) + *ne = *e + + // Copy ClassEligibility + if e.ClassEligibility != nil { + classes := make(map[string]bool, len(e.ClassEligibility)) + for class, elig := range e.ClassEligibility { + classes[class] = elig + } + ne.ClassEligibility = classes + } + + // Copy FailedTGAllocs + if e.FailedTGAllocs != nil { + failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs)) + for tg, metric := range e.FailedTGAllocs { + failedTGs[tg] = metric.Copy() + } + ne.FailedTGAllocs = failedTGs + } + + // Copy queued allocations + if e.QueuedAllocations != nil { + queuedAllocations := make(map[string]int, len(e.QueuedAllocations)) + for tg, num := range e.QueuedAllocations { + queuedAllocations[tg] = num + } + ne.QueuedAllocations = queuedAllocations + } + + return ne +} + +// ShouldEnqueue checks if a given evaluation should be enqueued into the +// eval_broker +func (e *Evaluation) ShouldEnqueue() bool { + switch e.Status { + case EvalStatusPending: + return true + case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled: + return false + default: + panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status)) + } +} + +// ShouldBlock checks if a given evaluation should be entered into the blocked +// eval tracker. +func (e *Evaluation) ShouldBlock() bool { + switch e.Status { + case EvalStatusBlocked: + return true + case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled: + return false + default: + panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status)) + } +} + +// MakePlan is used to make a plan from the given evaluation +// for a given Job +func (e *Evaluation) MakePlan(j *Job) *Plan { + p := &Plan{ + EvalID: e.ID, + Priority: e.Priority, + Job: j, + NodeUpdate: make(map[string][]*Allocation), + NodeAllocation: make(map[string][]*Allocation), + NodePreemptions: make(map[string][]*Allocation), + } + if j != nil { + p.AllAtOnce = j.AllAtOnce + } + return p +} + +// NextRollingEval creates an evaluation to followup this eval for rolling updates +func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation { + return &Evaluation{ + ID: uuid.Generate(), + Namespace: e.Namespace, + Priority: e.Priority, + Type: e.Type, + TriggeredBy: EvalTriggerRollingUpdate, + JobID: e.JobID, + JobModifyIndex: e.JobModifyIndex, + Status: EvalStatusPending, + Wait: wait, + PreviousEval: e.ID, + } +} + +// CreateBlockedEval creates a blocked evaluation to followup this eval to place any +// failed allocations. It takes the classes marked explicitly eligible or +// ineligible, whether the job has escaped computed node classes and whether the +// quota limit was reached. +func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, + escaped bool, quotaReached string) *Evaluation { + + return &Evaluation{ + ID: uuid.Generate(), + Namespace: e.Namespace, + Priority: e.Priority, + Type: e.Type, + TriggeredBy: EvalTriggerQueuedAllocs, + JobID: e.JobID, + JobModifyIndex: e.JobModifyIndex, + Status: EvalStatusBlocked, + PreviousEval: e.ID, + ClassEligibility: classEligibility, + EscapedComputedClass: escaped, + QuotaLimitReached: quotaReached, + } +} + +// CreateFailedFollowUpEval creates a follow up evaluation when the current one +// has been marked as failed because it has hit the delivery limit and will not +// be retried by the eval_broker. Callers should copy the created eval's ID to +// into the old eval's NextEval field. +func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation { + return &Evaluation{ + ID: uuid.Generate(), + Namespace: e.Namespace, + Priority: e.Priority, + Type: e.Type, + TriggeredBy: EvalTriggerFailedFollowUp, + JobID: e.JobID, + JobModifyIndex: e.JobModifyIndex, + Status: EvalStatusPending, + Wait: wait, + PreviousEval: e.ID, + } +} + +// Plan is used to submit a commit plan for task allocations. These +// are submitted to the leader which verifies that resources have +// not been overcommitted before admitting the plan. +type Plan struct { + // EvalID is the evaluation ID this plan is associated with + EvalID string + + // EvalToken is used to prevent a split-brain processing of + // an evaluation. There should only be a single scheduler running + // an Eval at a time, but this could be violated after a leadership + // transition. This unique token is used to reject plans that are + // being submitted from a different leader. + EvalToken string + + // Priority is the priority of the upstream job + Priority int + + // AllAtOnce is used to control if incremental scheduling of task groups + // is allowed or if we must do a gang scheduling of the entire job. + // If this is false, a plan may be partially applied. Otherwise, the + // entire plan must be able to make progress. + AllAtOnce bool + + // Job is the parent job of all the allocations in the Plan. + // Since a Plan only involves a single Job, we can reduce the size + // of the plan by only including it once. + Job *Job + + // NodeUpdate contains all the allocations for each node. For each node, + // this is a list of the allocations to update to either stop or evict. + NodeUpdate map[string][]*Allocation + + // NodeAllocation contains all the allocations for each node. + // The evicts must be considered prior to the allocations. + NodeAllocation map[string][]*Allocation + + // Annotations contains annotations by the scheduler to be used by operators + // to understand the decisions made by the scheduler. + Annotations *PlanAnnotations + + // Deployment is the deployment created or updated by the scheduler that + // should be applied by the planner. + Deployment *Deployment + + // DeploymentUpdates is a set of status updates to apply to the given + // deployments. This allows the scheduler to cancel any unneeded deployment + // because the job is stopped or the update block is removed. + DeploymentUpdates []*DeploymentStatusUpdate + + // NodePreemptions is a map from node id to a set of allocations from other + // lower priority jobs that are preempted. Preempted allocations are marked + // as evicted. + NodePreemptions map[string][]*Allocation +} + +// AppendUpdate marks the allocation for eviction. The clientStatus of the +// allocation may be optionally set by passing in a non-empty value. +func (p *Plan) AppendUpdate(alloc *Allocation, desiredStatus, desiredDesc, clientStatus string) { + newAlloc := new(Allocation) + *newAlloc = *alloc + + // If the job is not set in the plan we are deregistering a job so we + // extract the job from the allocation. + if p.Job == nil && newAlloc.Job != nil { + p.Job = newAlloc.Job + } + + // Normalize the job + newAlloc.Job = nil + + // Strip the resources as it can be rebuilt. + newAlloc.Resources = nil + + newAlloc.DesiredStatus = desiredStatus + newAlloc.DesiredDescription = desiredDesc + + if clientStatus != "" { + newAlloc.ClientStatus = clientStatus + } + + node := alloc.NodeID + existing := p.NodeUpdate[node] + p.NodeUpdate[node] = append(existing, newAlloc) +} + +// AppendPreemptedAlloc is used to append an allocation that's being preempted to the plan. +// To minimize the size of the plan, this only sets a minimal set of fields in the allocation +func (p *Plan) AppendPreemptedAlloc(alloc *Allocation, desiredStatus, preemptingAllocID string) { + newAlloc := &Allocation{} + newAlloc.ID = alloc.ID + newAlloc.JobID = alloc.JobID + newAlloc.Namespace = alloc.Namespace + newAlloc.DesiredStatus = desiredStatus + newAlloc.PreemptedByAllocation = preemptingAllocID + + desiredDesc := fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID) + newAlloc.DesiredDescription = desiredDesc + + // TaskResources are needed by the plan applier to check if allocations fit + // after removing preempted allocations + if alloc.AllocatedResources != nil { + newAlloc.AllocatedResources = alloc.AllocatedResources + } else { + // COMPAT Remove in version 0.11 + newAlloc.TaskResources = alloc.TaskResources + newAlloc.SharedResources = alloc.SharedResources + } + + // Append this alloc to slice for this node + node := alloc.NodeID + existing := p.NodePreemptions[node] + p.NodePreemptions[node] = append(existing, newAlloc) +} + +func (p *Plan) PopUpdate(alloc *Allocation) { + existing := p.NodeUpdate[alloc.NodeID] + n := len(existing) + if n > 0 && existing[n-1].ID == alloc.ID { + existing = existing[:n-1] + if len(existing) > 0 { + p.NodeUpdate[alloc.NodeID] = existing + } else { + delete(p.NodeUpdate, alloc.NodeID) + } + } +} + +func (p *Plan) AppendAlloc(alloc *Allocation) { + node := alloc.NodeID + existing := p.NodeAllocation[node] + + // Normalize the job + alloc.Job = nil + + p.NodeAllocation[node] = append(existing, alloc) +} + +// IsNoOp checks if this plan would do nothing +func (p *Plan) IsNoOp() bool { + return len(p.NodeUpdate) == 0 && + len(p.NodeAllocation) == 0 && + p.Deployment == nil && + len(p.DeploymentUpdates) == 0 +} + +// PlanResult is the result of a plan submitted to the leader. +type PlanResult struct { + // NodeUpdate contains all the updates that were committed. + NodeUpdate map[string][]*Allocation + + // NodeAllocation contains all the allocations that were committed. + NodeAllocation map[string][]*Allocation + + // Deployment is the deployment that was committed. + Deployment *Deployment + + // DeploymentUpdates is the set of deployment updates that were committed. + DeploymentUpdates []*DeploymentStatusUpdate + + // NodePreemptions is a map from node id to a set of allocations from other + // lower priority jobs that are preempted. Preempted allocations are marked + // as stopped. + NodePreemptions map[string][]*Allocation + + // RefreshIndex is the index the worker should refresh state up to. + // This allows all evictions and allocations to be materialized. + // If any allocations were rejected due to stale data (node state, + // over committed) this can be used to force a worker refresh. + RefreshIndex uint64 + + // AllocIndex is the Raft index in which the evictions and + // allocations took place. This is used for the write index. + AllocIndex uint64 +} + +// IsNoOp checks if this plan result would do nothing +func (p *PlanResult) IsNoOp() bool { + return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 && + len(p.DeploymentUpdates) == 0 && p.Deployment == nil +} + +// FullCommit is used to check if all the allocations in a plan +// were committed as part of the result. Returns if there was +// a match, and the number of expected and actual allocations. +func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) { + expected := 0 + actual := 0 + for name, allocList := range plan.NodeAllocation { + didAlloc, _ := p.NodeAllocation[name] + expected += len(allocList) + actual += len(didAlloc) + } + return actual == expected, expected, actual +} + +// PlanAnnotations holds annotations made by the scheduler to give further debug +// information to operators. +type PlanAnnotations struct { + // DesiredTGUpdates is the set of desired updates per task group. + DesiredTGUpdates map[string]*DesiredUpdates + + // PreemptedAllocs is the set of allocations to be preempted to make the placement successful. + PreemptedAllocs []*AllocListStub +} + +// DesiredUpdates is the set of changes the scheduler would like to make given +// sufficient resources and cluster capacity. +type DesiredUpdates struct { + Ignore uint64 + Place uint64 + Migrate uint64 + Stop uint64 + InPlaceUpdate uint64 + DestructiveUpdate uint64 + Canary uint64 + Preemptions uint64 +} + +func (d *DesiredUpdates) GoString() string { + return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)", + d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary) +} + +// msgpackHandle is a shared handle for encoding/decoding of structs +var MsgpackHandle = func() *codec.MsgpackHandle { + h := &codec.MsgpackHandle{RawToString: true} + + // Sets the default type for decoding a map into a nil interface{}. + // This is necessary in particular because we store the driver configs as a + // nil interface{}. + h.MapType = reflect.TypeOf(map[string]interface{}(nil)) + return h +}() + +var ( + // JsonHandle and JsonHandlePretty are the codec handles to JSON encode + // structs. The pretty handle will add indents for easier human consumption. + JsonHandle = &codec.JsonHandle{ + HTMLCharsAsIs: true, + } + JsonHandlePretty = &codec.JsonHandle{ + HTMLCharsAsIs: true, + Indent: 4, + } +) + +// TODO Figure out if we can remove this. This is our fork that is just way +// behind. I feel like its original purpose was to pin at a stable version but +// now we can accomplish this with vendoring. +var HashiMsgpackHandle = func() *hcodec.MsgpackHandle { + h := &hcodec.MsgpackHandle{RawToString: true} + + // Sets the default type for decoding a map into a nil interface{}. + // This is necessary in particular because we store the driver configs as a + // nil interface{}. + h.MapType = reflect.TypeOf(map[string]interface{}(nil)) + return h +}() + +// Decode is used to decode a MsgPack encoded object +func Decode(buf []byte, out interface{}) error { + return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out) +} + +// Encode is used to encode a MsgPack object with type prefix +func Encode(t MessageType, msg interface{}) ([]byte, error) { + var buf bytes.Buffer + buf.WriteByte(uint8(t)) + err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg) + return buf.Bytes(), err +} + +// KeyringResponse is a unified key response and can be used for install, +// remove, use, as well as listing key queries. +type KeyringResponse struct { + Messages map[string]string + Keys map[string]int + NumNodes int +} + +// KeyringRequest is request objects for serf key operations. +type KeyringRequest struct { + Key string +} + +// RecoverableError wraps an error and marks whether it is recoverable and could +// be retried or it is fatal. +type RecoverableError struct { + Err string + Recoverable bool +} + +// NewRecoverableError is used to wrap an error and mark it as recoverable or +// not. +func NewRecoverableError(e error, recoverable bool) error { + if e == nil { + return nil + } + + return &RecoverableError{ + Err: e.Error(), + Recoverable: recoverable, + } +} + +// WrapRecoverable wraps an existing error in a new RecoverableError with a new +// message. If the error was recoverable before the returned error is as well; +// otherwise it is unrecoverable. +func WrapRecoverable(msg string, err error) error { + return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)} +} + +func (r *RecoverableError) Error() string { + return r.Err +} + +func (r *RecoverableError) IsRecoverable() bool { + return r.Recoverable +} + +func (r *RecoverableError) IsUnrecoverable() bool { + return !r.Recoverable +} + +// Recoverable is an interface for errors to implement to indicate whether or +// not they are fatal or recoverable. +type Recoverable interface { + error + IsRecoverable() bool +} + +// IsRecoverable returns true if error is a RecoverableError with +// Recoverable=true. Otherwise false is returned. +func IsRecoverable(e error) bool { + if re, ok := e.(Recoverable); ok { + return re.IsRecoverable() + } + return false +} + +// WrappedServerError wraps an error and satisfies +// both the Recoverable and the ServerSideError interfaces +type WrappedServerError struct { + Err error +} + +// NewWrappedServerError is used to create a wrapped server side error +func NewWrappedServerError(e error) error { + return &WrappedServerError{ + Err: e, + } +} + +func (r *WrappedServerError) IsRecoverable() bool { + return IsRecoverable(r.Err) +} + +func (r *WrappedServerError) Error() string { + return r.Err.Error() +} + +func (r *WrappedServerError) IsServerSide() bool { + return true +} + +// ServerSideError is an interface for errors to implement to indicate +// errors occurring after the request makes it to a server +type ServerSideError interface { + error + IsServerSide() bool +} + +// IsServerSide returns true if error is a wrapped +// server side error +func IsServerSide(e error) bool { + if se, ok := e.(ServerSideError); ok { + return se.IsServerSide() + } + return false +} + +// ACLPolicy is used to represent an ACL policy +type ACLPolicy struct { + Name string // Unique name + Description string // Human readable + Rules string // HCL or JSON format + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +// SetHash is used to compute and set the hash of the ACL policy +func (c *ACLPolicy) SetHash() []byte { + // Initialize a 256bit Blake2 hash (32 bytes) + hash, err := blake2b.New256(nil) + if err != nil { + panic(err) + } + + // Write all the user set fields + hash.Write([]byte(c.Name)) + hash.Write([]byte(c.Description)) + hash.Write([]byte(c.Rules)) + + // Finalize the hash + hashVal := hash.Sum(nil) + + // Set and return the hash + c.Hash = hashVal + return hashVal +} + +func (a *ACLPolicy) Stub() *ACLPolicyListStub { + return &ACLPolicyListStub{ + Name: a.Name, + Description: a.Description, + Hash: a.Hash, + CreateIndex: a.CreateIndex, + ModifyIndex: a.ModifyIndex, + } +} + +func (a *ACLPolicy) Validate() error { + var mErr multierror.Error + if !validPolicyName.MatchString(a.Name) { + err := fmt.Errorf("invalid name '%s'", a.Name) + mErr.Errors = append(mErr.Errors, err) + } + if _, err := acl.Parse(a.Rules); err != nil { + err = fmt.Errorf("failed to parse rules: %v", err) + mErr.Errors = append(mErr.Errors, err) + } + if len(a.Description) > maxPolicyDescriptionLength { + err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength) + mErr.Errors = append(mErr.Errors, err) + } + return mErr.ErrorOrNil() +} + +// ACLPolicyListStub is used to for listing ACL policies +type ACLPolicyListStub struct { + Name string + Description string + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +// ACLPolicyListRequest is used to request a list of policies +type ACLPolicyListRequest struct { + QueryOptions +} + +// ACLPolicySpecificRequest is used to query a specific policy +type ACLPolicySpecificRequest struct { + Name string + QueryOptions +} + +// ACLPolicySetRequest is used to query a set of policies +type ACLPolicySetRequest struct { + Names []string + QueryOptions +} + +// ACLPolicyListResponse is used for a list request +type ACLPolicyListResponse struct { + Policies []*ACLPolicyListStub + QueryMeta +} + +// SingleACLPolicyResponse is used to return a single policy +type SingleACLPolicyResponse struct { + Policy *ACLPolicy + QueryMeta +} + +// ACLPolicySetResponse is used to return a set of policies +type ACLPolicySetResponse struct { + Policies map[string]*ACLPolicy + QueryMeta +} + +// ACLPolicyDeleteRequest is used to delete a set of policies +type ACLPolicyDeleteRequest struct { + Names []string + WriteRequest +} + +// ACLPolicyUpsertRequest is used to upsert a set of policies +type ACLPolicyUpsertRequest struct { + Policies []*ACLPolicy + WriteRequest +} + +// ACLToken represents a client token which is used to Authenticate +type ACLToken struct { + AccessorID string // Public Accessor ID (UUID) + SecretID string // Secret ID, private (UUID) + Name string // Human friendly name + Type string // Client or Management + Policies []string // Policies this token ties to + Global bool // Global or Region local + Hash []byte + CreateTime time.Time // Time of creation + CreateIndex uint64 + ModifyIndex uint64 +} + +var ( + // AnonymousACLToken is used no SecretID is provided, and the + // request is made anonymously. + AnonymousACLToken = &ACLToken{ + AccessorID: "anonymous", + Name: "Anonymous Token", + Type: ACLClientToken, + Policies: []string{"anonymous"}, + Global: false, + } +) + +type ACLTokenListStub struct { + AccessorID string + Name string + Type string + Policies []string + Global bool + Hash []byte + CreateTime time.Time + CreateIndex uint64 + ModifyIndex uint64 +} + +// SetHash is used to compute and set the hash of the ACL token +func (a *ACLToken) SetHash() []byte { + // Initialize a 256bit Blake2 hash (32 bytes) + hash, err := blake2b.New256(nil) + if err != nil { + panic(err) + } + + // Write all the user set fields + hash.Write([]byte(a.Name)) + hash.Write([]byte(a.Type)) + for _, policyName := range a.Policies { + hash.Write([]byte(policyName)) + } + if a.Global { + hash.Write([]byte("global")) + } else { + hash.Write([]byte("local")) + } + + // Finalize the hash + hashVal := hash.Sum(nil) + + // Set and return the hash + a.Hash = hashVal + return hashVal +} + +func (a *ACLToken) Stub() *ACLTokenListStub { + return &ACLTokenListStub{ + AccessorID: a.AccessorID, + Name: a.Name, + Type: a.Type, + Policies: a.Policies, + Global: a.Global, + Hash: a.Hash, + CreateTime: a.CreateTime, + CreateIndex: a.CreateIndex, + ModifyIndex: a.ModifyIndex, + } +} + +// Validate is used to sanity check a token +func (a *ACLToken) Validate() error { + var mErr multierror.Error + if len(a.Name) > maxTokenNameLength { + mErr.Errors = append(mErr.Errors, fmt.Errorf("token name too long")) + } + switch a.Type { + case ACLClientToken: + if len(a.Policies) == 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("client token missing policies")) + } + case ACLManagementToken: + if len(a.Policies) != 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("management token cannot be associated with policies")) + } + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("token type must be client or management")) + } + return mErr.ErrorOrNil() +} + +// PolicySubset checks if a given set of policies is a subset of the token +func (a *ACLToken) PolicySubset(policies []string) bool { + // Hot-path the management tokens, superset of all policies. + if a.Type == ACLManagementToken { + return true + } + associatedPolicies := make(map[string]struct{}, len(a.Policies)) + for _, policy := range a.Policies { + associatedPolicies[policy] = struct{}{} + } + for _, policy := range policies { + if _, ok := associatedPolicies[policy]; !ok { + return false + } + } + return true +} + +// ACLTokenListRequest is used to request a list of tokens +type ACLTokenListRequest struct { + GlobalOnly bool + QueryOptions +} + +// ACLTokenSpecificRequest is used to query a specific token +type ACLTokenSpecificRequest struct { + AccessorID string + QueryOptions +} + +// ACLTokenSetRequest is used to query a set of tokens +type ACLTokenSetRequest struct { + AccessorIDS []string + QueryOptions +} + +// ACLTokenListResponse is used for a list request +type ACLTokenListResponse struct { + Tokens []*ACLTokenListStub + QueryMeta +} + +// SingleACLTokenResponse is used to return a single token +type SingleACLTokenResponse struct { + Token *ACLToken + QueryMeta +} + +// ACLTokenSetResponse is used to return a set of token +type ACLTokenSetResponse struct { + Tokens map[string]*ACLToken // Keyed by Accessor ID + QueryMeta +} + +// ResolveACLTokenRequest is used to resolve a specific token +type ResolveACLTokenRequest struct { + SecretID string + QueryOptions +} + +// ResolveACLTokenResponse is used to resolve a single token +type ResolveACLTokenResponse struct { + Token *ACLToken + QueryMeta +} + +// ACLTokenDeleteRequest is used to delete a set of tokens +type ACLTokenDeleteRequest struct { + AccessorIDs []string + WriteRequest +} + +// ACLTokenBootstrapRequest is used to bootstrap ACLs +type ACLTokenBootstrapRequest struct { + Token *ACLToken // Not client specifiable + ResetIndex uint64 // Reset index is used to clear the bootstrap token + WriteRequest +} + +// ACLTokenUpsertRequest is used to upsert a set of tokens +type ACLTokenUpsertRequest struct { + Tokens []*ACLToken + WriteRequest +} + +// ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest +type ACLTokenUpsertResponse struct { + Tokens []*ACLToken + WriteMeta +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go new file mode 100644 index 0000000..f883d93 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go @@ -0,0 +1,3 @@ +package structs + +//go:generate ./generate.sh diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/testing.go b/vendor/github.com/hashicorp/nomad/nomad/structs/testing.go new file mode 100644 index 0000000..7f44353 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/testing.go @@ -0,0 +1,271 @@ +package structs + +import ( + "fmt" + "time" + + "github.com/hashicorp/nomad/helper/uuid" + psstructs "github.com/hashicorp/nomad/plugins/shared/structs" +) + +// NodeResourcesToAllocatedResources converts a node resources to an allocated +// resources. The task name used is "web" and network is omitted. This is +// useful when trying to make an allocation fill an entire node. +func NodeResourcesToAllocatedResources(n *NodeResources) *AllocatedResources { + if n == nil { + return nil + } + + return &AllocatedResources{ + Tasks: map[string]*AllocatedTaskResources{ + "web": { + Cpu: AllocatedCpuResources{ + CpuShares: n.Cpu.CpuShares, + }, + Memory: AllocatedMemoryResources{ + MemoryMB: n.Memory.MemoryMB, + }, + }, + }, + Shared: AllocatedSharedResources{ + DiskMB: n.Disk.DiskMB, + }, + } +} + +func MockNode() *Node { + node := &Node{ + ID: uuid.Generate(), + SecretID: uuid.Generate(), + Datacenter: "dc1", + Name: "foobar", + Attributes: map[string]string{ + "kernel.name": "linux", + "arch": "x86", + "nomad.version": "0.5.0", + "driver.exec": "1", + "driver.mock_driver": "1", + }, + NodeResources: &NodeResources{ + Cpu: NodeCpuResources{ + CpuShares: 4000, + }, + Memory: NodeMemoryResources{ + MemoryMB: 8192, + }, + Disk: NodeDiskResources{ + DiskMB: 100 * 1024, + }, + Networks: []*NetworkResource{ + { + Device: "eth0", + CIDR: "192.168.0.100/32", + MBits: 1000, + }, + }, + }, + ReservedResources: &NodeReservedResources{ + Cpu: NodeReservedCpuResources{ + CpuShares: 100, + }, + Memory: NodeReservedMemoryResources{ + MemoryMB: 256, + }, + Disk: NodeReservedDiskResources{ + DiskMB: 4 * 1024, + }, + Networks: NodeReservedNetworkResources{ + ReservedHostPorts: "22", + }, + }, + Links: map[string]string{ + "consul": "foobar.dc1", + }, + Meta: map[string]string{ + "pci-dss": "true", + "database": "mysql", + "version": "5.6", + }, + NodeClass: "linux-medium-pci", + Status: NodeStatusReady, + SchedulingEligibility: NodeSchedulingEligible, + } + node.ComputeClass() + return node +} + +// NvidiaNode returns a node with two instances of an Nvidia GPU +func MockNvidiaNode() *Node { + n := MockNode() + n.NodeResources.Devices = []*NodeDeviceResource{ + { + Type: "gpu", + Vendor: "nvidia", + Name: "1080ti", + Attributes: map[string]*psstructs.Attribute{ + "memory": psstructs.NewIntAttribute(11, psstructs.UnitGiB), + "cuda_cores": psstructs.NewIntAttribute(3584, ""), + "graphics_clock": psstructs.NewIntAttribute(1480, psstructs.UnitMHz), + "memory_bandwidth": psstructs.NewIntAttribute(11, psstructs.UnitGBPerS), + }, + Instances: []*NodeDevice{ + { + ID: uuid.Generate(), + Healthy: true, + }, + { + ID: uuid.Generate(), + Healthy: true, + }, + }, + }, + } + n.ComputeClass() + return n +} + +func MockJob() *Job { + job := &Job{ + Region: "global", + ID: fmt.Sprintf("mock-service-%s", uuid.Generate()), + Name: "my-job", + Namespace: DefaultNamespace, + Type: JobTypeService, + Priority: 50, + AllAtOnce: false, + Datacenters: []string{"dc1"}, + Constraints: []*Constraint{ + { + LTarget: "${attr.kernel.name}", + RTarget: "linux", + Operand: "=", + }, + }, + TaskGroups: []*TaskGroup{ + { + Name: "web", + Count: 10, + EphemeralDisk: &EphemeralDisk{ + SizeMB: 150, + }, + RestartPolicy: &RestartPolicy{ + Attempts: 3, + Interval: 10 * time.Minute, + Delay: 1 * time.Minute, + Mode: RestartPolicyModeDelay, + }, + ReschedulePolicy: &ReschedulePolicy{ + Attempts: 2, + Interval: 10 * time.Minute, + Delay: 5 * time.Second, + DelayFunction: "constant", + }, + Migrate: DefaultMigrateStrategy(), + Tasks: []*Task{ + { + Name: "web", + Driver: "exec", + Config: map[string]interface{}{ + "command": "/bin/date", + }, + Env: map[string]string{ + "FOO": "bar", + }, + Services: []*Service{ + { + Name: "${TASK}-frontend", + PortLabel: "http", + Tags: []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"}, + Checks: []*ServiceCheck{ + { + Name: "check-table", + Type: ServiceCheckScript, + Command: "/usr/local/check-table-${meta.database}", + Args: []string{"${meta.version}"}, + Interval: 30 * time.Second, + Timeout: 5 * time.Second, + }, + }, + }, + { + Name: "${TASK}-admin", + PortLabel: "admin", + }, + }, + LogConfig: DefaultLogConfig(), + Resources: &Resources{ + CPU: 500, + MemoryMB: 256, + Networks: []*NetworkResource{ + { + MBits: 50, + DynamicPorts: []Port{ + {Label: "http"}, + {Label: "admin"}, + }, + }, + }, + }, + Meta: map[string]string{ + "foo": "bar", + }, + }, + }, + Meta: map[string]string{ + "elb_check_type": "http", + "elb_check_interval": "30s", + "elb_check_min": "3", + }, + }, + }, + Meta: map[string]string{ + "owner": "armon", + }, + Status: JobStatusPending, + Version: 0, + CreateIndex: 42, + ModifyIndex: 99, + JobModifyIndex: 99, + } + job.Canonicalize() + return job +} + +func MockAlloc() *Allocation { + alloc := &Allocation{ + ID: uuid.Generate(), + EvalID: uuid.Generate(), + NodeID: "12345678-abcd-efab-cdef-123456789abc", + Namespace: DefaultNamespace, + TaskGroup: "web", + AllocatedResources: &AllocatedResources{ + Tasks: map[string]*AllocatedTaskResources{ + "web": { + Cpu: AllocatedCpuResources{ + CpuShares: 500, + }, + Memory: AllocatedMemoryResources{ + MemoryMB: 256, + }, + Networks: []*NetworkResource{ + { + Device: "eth0", + IP: "192.168.0.100", + ReservedPorts: []Port{{Label: "admin", Value: 5000}}, + MBits: 50, + DynamicPorts: []Port{{Label: "http", Value: 9876}}, + }, + }, + }, + }, + Shared: AllocatedSharedResources{ + DiskMB: 150, + }, + }, + Job: MockJob(), + DesiredStatus: AllocDesiredStatusRun, + ClientStatus: AllocClientStatusPending, + } + alloc.JobID = alloc.Job.ID + return alloc +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/base/base.go b/vendor/github.com/hashicorp/nomad/plugins/base/base.go new file mode 100644 index 0000000..f0939da --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/base/base.go @@ -0,0 +1,98 @@ +package base + +import ( + "github.com/hashicorp/nomad/plugins/base/proto" + "github.com/hashicorp/nomad/plugins/shared/hclspec" +) + +// BasePlugin is the interface that all Nomad plugins must support. +type BasePlugin interface { + // PluginInfo describes the type and version of a plugin. + PluginInfo() (*PluginInfoResponse, error) + + // ConfigSchema returns the schema for parsing the plugins configuration. + ConfigSchema() (*hclspec.Spec, error) + + // SetConfig is used to set the configuration by passing a MessagePack + // encoding of it. + SetConfig(c *Config) error +} + +// PluginInfoResponse returns basic information about the plugin such that Nomad +// can decide whether to load the plugin or not. +type PluginInfoResponse struct { + // Type returns the plugins type + Type string + + // PluginApiVersions returns the versions of the Nomad plugin API that the + // plugin supports. + PluginApiVersions []string + + // PluginVersion is the version of the plugin. + PluginVersion string + + // Name is the plugins name. + Name string +} + +// Config contains the configuration for the plugin. +type Config struct { + // ApiVersion is the negotiated plugin API version to use. + ApiVersion string + + // PluginConfig is the MessagePack encoding of the plugins user + // configuration. + PluginConfig []byte + + // AgentConfig is the Nomad agents configuration as applicable to plugins + AgentConfig *AgentConfig +} + +// AgentConfig is the Nomad agent's configuration sent to all plugins +type AgentConfig struct { + Driver *ClientDriverConfig +} + +// ClientDriverConfig is the driver specific configuration for all driver plugins +type ClientDriverConfig struct { + // ClientMaxPort is the upper range of the ports that the client uses for + // communicating with plugin subsystems over loopback + ClientMaxPort uint + + // ClientMinPort is the lower range of the ports that the client uses for + // communicating with plugin subsystems over loopback + ClientMinPort uint +} + +func (c *AgentConfig) toProto() *proto.NomadConfig { + if c == nil { + return nil + } + + cfg := &proto.NomadConfig{} + if c.Driver != nil { + + cfg.Driver = &proto.NomadDriverConfig{ + ClientMaxPort: uint32(c.Driver.ClientMaxPort), + ClientMinPort: uint32(c.Driver.ClientMinPort), + } + } + + return cfg +} + +func nomadConfigFromProto(pb *proto.NomadConfig) *AgentConfig { + if pb == nil { + return nil + } + + cfg := &AgentConfig{} + if pb.Driver != nil { + cfg.Driver = &ClientDriverConfig{ + ClientMaxPort: uint(pb.Driver.ClientMaxPort), + ClientMinPort: uint(pb.Driver.ClientMinPort), + } + } + + return cfg +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/base/client.go b/vendor/github.com/hashicorp/nomad/plugins/base/client.go new file mode 100644 index 0000000..d7c4e94 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/base/client.go @@ -0,0 +1,65 @@ +package base + +import ( + "context" + "fmt" + + "github.com/hashicorp/nomad/helper/pluginutils/grpcutils" + "github.com/hashicorp/nomad/plugins/base/proto" + "github.com/hashicorp/nomad/plugins/shared/hclspec" +) + +// BasePluginClient implements the client side of a remote base plugin, using +// gRPC to communicate to the remote plugin. +type BasePluginClient struct { + Client proto.BasePluginClient + + // DoneCtx is closed when the plugin exits + DoneCtx context.Context +} + +func (b *BasePluginClient) PluginInfo() (*PluginInfoResponse, error) { + presp, err := b.Client.PluginInfo(b.DoneCtx, &proto.PluginInfoRequest{}) + if err != nil { + return nil, grpcutils.HandleGrpcErr(err, b.DoneCtx) + } + + var ptype string + switch presp.GetType() { + case proto.PluginType_DRIVER: + ptype = PluginTypeDriver + case proto.PluginType_DEVICE: + ptype = PluginTypeDevice + default: + return nil, fmt.Errorf("plugin is of unknown type: %q", presp.GetType().String()) + } + + resp := &PluginInfoResponse{ + Type: ptype, + PluginApiVersions: presp.GetPluginApiVersions(), + PluginVersion: presp.GetPluginVersion(), + Name: presp.GetName(), + } + + return resp, nil +} + +func (b *BasePluginClient) ConfigSchema() (*hclspec.Spec, error) { + presp, err := b.Client.ConfigSchema(b.DoneCtx, &proto.ConfigSchemaRequest{}) + if err != nil { + return nil, grpcutils.HandleGrpcErr(err, b.DoneCtx) + } + + return presp.GetSpec(), nil +} + +func (b *BasePluginClient) SetConfig(c *Config) error { + // Send the config + _, err := b.Client.SetConfig(b.DoneCtx, &proto.SetConfigRequest{ + MsgpackConfig: c.PluginConfig, + NomadConfig: c.AgentConfig.toProto(), + PluginApiVersion: c.ApiVersion, + }) + + return grpcutils.HandleGrpcErr(err, b.DoneCtx) +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/base/plugin.go b/vendor/github.com/hashicorp/nomad/plugins/base/plugin.go new file mode 100644 index 0000000..be41451 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/base/plugin.go @@ -0,0 +1,75 @@ +package base + +import ( + "bytes" + "context" + "reflect" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugins/base/proto" + "github.com/ugorji/go/codec" + "google.golang.org/grpc" +) + +const ( + // PluginTypeBase implements the base plugin interface + PluginTypeBase = "base" + + // PluginTypeDriver implements the driver plugin interface + PluginTypeDriver = "driver" + + // PluginTypeDevice implements the device plugin interface + PluginTypeDevice = "device" +) + +var ( + // Handshake is a common handshake that is shared by all plugins and Nomad. + Handshake = plugin.HandshakeConfig{ + // ProtocolVersion for the executor protocol. + // Version 1: pre 0.9 netrpc based executor + // Version 2: 0.9+ grpc based executor + ProtocolVersion: 2, + MagicCookieKey: "NOMAD_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "e4327c2e01eabfd75a8a67adb114fb34a757d57eee7728d857a8cec6e91a7255", + } +) + +// PluginBase is wraps a BasePlugin and implements go-plugins GRPCPlugin +// interface to expose the interface over gRPC. +type PluginBase struct { + plugin.NetRPCUnsupportedPlugin + Impl BasePlugin +} + +func (p *PluginBase) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterBasePluginServer(s, &basePluginServer{ + impl: p.Impl, + broker: broker, + }) + return nil +} + +func (p *PluginBase) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &BasePluginClient{ + Client: proto.NewBasePluginClient(c), + DoneCtx: ctx, + }, nil +} + +// MsgpackHandle is a shared handle for encoding/decoding of structs +var MsgpackHandle = func() *codec.MsgpackHandle { + h := &codec.MsgpackHandle{} + h.RawToString = true + h.MapType = reflect.TypeOf(map[string]interface{}(nil)) + return h +}() + +// MsgPackDecode is used to decode a MsgPack encoded object +func MsgPackDecode(buf []byte, out interface{}) error { + return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out) +} + +// MsgPackEncode is used to encode an object to MsgPack +func MsgPackEncode(b *[]byte, in interface{}) error { + return codec.NewEncoderBytes(b, MsgpackHandle).Encode(in) +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/base/proto/base.pb.go b/vendor/github.com/hashicorp/nomad/plugins/base/proto/base.pb.go new file mode 100644 index 0000000..4be310b --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/base/proto/base.pb.go @@ -0,0 +1,601 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/base/proto/base.proto + +package proto + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import hclspec "github.com/hashicorp/nomad/plugins/shared/hclspec" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// PluginType enumerates the type of plugins Nomad supports +type PluginType int32 + +const ( + PluginType_UNKNOWN PluginType = 0 + PluginType_DRIVER PluginType = 2 + PluginType_DEVICE PluginType = 3 +) + +var PluginType_name = map[int32]string{ + 0: "UNKNOWN", + 2: "DRIVER", + 3: "DEVICE", +} +var PluginType_value = map[string]int32{ + "UNKNOWN": 0, + "DRIVER": 2, + "DEVICE": 3, +} + +func (x PluginType) String() string { + return proto.EnumName(PluginType_name, int32(x)) +} +func (PluginType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{0} +} + +// PluginInfoRequest is used to request the plugins basic information. +type PluginInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginInfoRequest) Reset() { *m = PluginInfoRequest{} } +func (m *PluginInfoRequest) String() string { return proto.CompactTextString(m) } +func (*PluginInfoRequest) ProtoMessage() {} +func (*PluginInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{0} +} +func (m *PluginInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginInfoRequest.Unmarshal(m, b) +} +func (m *PluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginInfoRequest.Marshal(b, m, deterministic) +} +func (dst *PluginInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginInfoRequest.Merge(dst, src) +} +func (m *PluginInfoRequest) XXX_Size() int { + return xxx_messageInfo_PluginInfoRequest.Size(m) +} +func (m *PluginInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PluginInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginInfoRequest proto.InternalMessageInfo + +// PluginInfoResponse returns basic information about the plugin such +// that Nomad can decide whether to load the plugin or not. +type PluginInfoResponse struct { + // type indicates what type of plugin this is. + Type PluginType `protobuf:"varint,1,opt,name=type,proto3,enum=hashicorp.nomad.plugins.base.proto.PluginType" json:"type,omitempty"` + // plugin_api_versions indicates the versions of the Nomad Plugin API + // this plugin supports. + PluginApiVersions []string `protobuf:"bytes,2,rep,name=plugin_api_versions,json=pluginApiVersions,proto3" json:"plugin_api_versions,omitempty"` + // plugin_version is the semver version of this individual plugin. + // This is divorce from Nomad’s development and versioning. + PluginVersion string `protobuf:"bytes,3,opt,name=plugin_version,json=pluginVersion,proto3" json:"plugin_version,omitempty"` + // name is the name of the plugin + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginInfoResponse) Reset() { *m = PluginInfoResponse{} } +func (m *PluginInfoResponse) String() string { return proto.CompactTextString(m) } +func (*PluginInfoResponse) ProtoMessage() {} +func (*PluginInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{1} +} +func (m *PluginInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginInfoResponse.Unmarshal(m, b) +} +func (m *PluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginInfoResponse.Marshal(b, m, deterministic) +} +func (dst *PluginInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginInfoResponse.Merge(dst, src) +} +func (m *PluginInfoResponse) XXX_Size() int { + return xxx_messageInfo_PluginInfoResponse.Size(m) +} +func (m *PluginInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PluginInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginInfoResponse proto.InternalMessageInfo + +func (m *PluginInfoResponse) GetType() PluginType { + if m != nil { + return m.Type + } + return PluginType_UNKNOWN +} + +func (m *PluginInfoResponse) GetPluginApiVersions() []string { + if m != nil { + return m.PluginApiVersions + } + return nil +} + +func (m *PluginInfoResponse) GetPluginVersion() string { + if m != nil { + return m.PluginVersion + } + return "" +} + +func (m *PluginInfoResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// ConfigSchemaRequest is used to request the configurations schema. +type ConfigSchemaRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigSchemaRequest) Reset() { *m = ConfigSchemaRequest{} } +func (m *ConfigSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*ConfigSchemaRequest) ProtoMessage() {} +func (*ConfigSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{2} +} +func (m *ConfigSchemaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigSchemaRequest.Unmarshal(m, b) +} +func (m *ConfigSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigSchemaRequest.Marshal(b, m, deterministic) +} +func (dst *ConfigSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSchemaRequest.Merge(dst, src) +} +func (m *ConfigSchemaRequest) XXX_Size() int { + return xxx_messageInfo_ConfigSchemaRequest.Size(m) +} +func (m *ConfigSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSchemaRequest proto.InternalMessageInfo + +// ConfigSchemaResponse returns the plugins configuration schema. +type ConfigSchemaResponse struct { + // spec is the plugins configuration schema + Spec *hclspec.Spec `protobuf:"bytes,1,opt,name=spec,proto3" json:"spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConfigSchemaResponse) Reset() { *m = ConfigSchemaResponse{} } +func (m *ConfigSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*ConfigSchemaResponse) ProtoMessage() {} +func (*ConfigSchemaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{3} +} +func (m *ConfigSchemaResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConfigSchemaResponse.Unmarshal(m, b) +} +func (m *ConfigSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConfigSchemaResponse.Marshal(b, m, deterministic) +} +func (dst *ConfigSchemaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSchemaResponse.Merge(dst, src) +} +func (m *ConfigSchemaResponse) XXX_Size() int { + return xxx_messageInfo_ConfigSchemaResponse.Size(m) +} +func (m *ConfigSchemaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSchemaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSchemaResponse proto.InternalMessageInfo + +func (m *ConfigSchemaResponse) GetSpec() *hclspec.Spec { + if m != nil { + return m.Spec + } + return nil +} + +// SetConfigRequest is used to set the configuration +type SetConfigRequest struct { + // msgpack_config is the configuration encoded as MessagePack. + MsgpackConfig []byte `protobuf:"bytes,1,opt,name=msgpack_config,json=msgpackConfig,proto3" json:"msgpack_config,omitempty"` + // nomad_config is the nomad client configuration sent to all plugins. + NomadConfig *NomadConfig `protobuf:"bytes,2,opt,name=nomad_config,json=nomadConfig,proto3" json:"nomad_config,omitempty"` + // plugin_api_version is the api version to use. + PluginApiVersion string `protobuf:"bytes,3,opt,name=plugin_api_version,json=pluginApiVersion,proto3" json:"plugin_api_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetConfigRequest) Reset() { *m = SetConfigRequest{} } +func (m *SetConfigRequest) String() string { return proto.CompactTextString(m) } +func (*SetConfigRequest) ProtoMessage() {} +func (*SetConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{4} +} +func (m *SetConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetConfigRequest.Unmarshal(m, b) +} +func (m *SetConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetConfigRequest.Marshal(b, m, deterministic) +} +func (dst *SetConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetConfigRequest.Merge(dst, src) +} +func (m *SetConfigRequest) XXX_Size() int { + return xxx_messageInfo_SetConfigRequest.Size(m) +} +func (m *SetConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetConfigRequest proto.InternalMessageInfo + +func (m *SetConfigRequest) GetMsgpackConfig() []byte { + if m != nil { + return m.MsgpackConfig + } + return nil +} + +func (m *SetConfigRequest) GetNomadConfig() *NomadConfig { + if m != nil { + return m.NomadConfig + } + return nil +} + +func (m *SetConfigRequest) GetPluginApiVersion() string { + if m != nil { + return m.PluginApiVersion + } + return "" +} + +// NomadConfig is the client configuration sent to all plugins +type NomadConfig struct { + // driver specific configuration sent to all plugins + Driver *NomadDriverConfig `protobuf:"bytes,1,opt,name=driver,proto3" json:"driver,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NomadConfig) Reset() { *m = NomadConfig{} } +func (m *NomadConfig) String() string { return proto.CompactTextString(m) } +func (*NomadConfig) ProtoMessage() {} +func (*NomadConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{5} +} +func (m *NomadConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NomadConfig.Unmarshal(m, b) +} +func (m *NomadConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NomadConfig.Marshal(b, m, deterministic) +} +func (dst *NomadConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_NomadConfig.Merge(dst, src) +} +func (m *NomadConfig) XXX_Size() int { + return xxx_messageInfo_NomadConfig.Size(m) +} +func (m *NomadConfig) XXX_DiscardUnknown() { + xxx_messageInfo_NomadConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_NomadConfig proto.InternalMessageInfo + +func (m *NomadConfig) GetDriver() *NomadDriverConfig { + if m != nil { + return m.Driver + } + return nil +} + +// NomadDriverConfig is the driver specific client configuration sent to all +// driver plugins +type NomadDriverConfig struct { + // ClientMaxPort is the upper range of the ports that the client uses for + // communicating with plugin subsystems over loopback + ClientMaxPort uint32 `protobuf:"varint,1,opt,name=ClientMaxPort,proto3" json:"ClientMaxPort,omitempty"` + // ClientMinPort is the lower range of the ports that the client uses for + // communicating with plugin subsystems over loopback + ClientMinPort uint32 `protobuf:"varint,2,opt,name=ClientMinPort,proto3" json:"ClientMinPort,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NomadDriverConfig) Reset() { *m = NomadDriverConfig{} } +func (m *NomadDriverConfig) String() string { return proto.CompactTextString(m) } +func (*NomadDriverConfig) ProtoMessage() {} +func (*NomadDriverConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{6} +} +func (m *NomadDriverConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NomadDriverConfig.Unmarshal(m, b) +} +func (m *NomadDriverConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NomadDriverConfig.Marshal(b, m, deterministic) +} +func (dst *NomadDriverConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_NomadDriverConfig.Merge(dst, src) +} +func (m *NomadDriverConfig) XXX_Size() int { + return xxx_messageInfo_NomadDriverConfig.Size(m) +} +func (m *NomadDriverConfig) XXX_DiscardUnknown() { + xxx_messageInfo_NomadDriverConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_NomadDriverConfig proto.InternalMessageInfo + +func (m *NomadDriverConfig) GetClientMaxPort() uint32 { + if m != nil { + return m.ClientMaxPort + } + return 0 +} + +func (m *NomadDriverConfig) GetClientMinPort() uint32 { + if m != nil { + return m.ClientMinPort + } + return 0 +} + +// SetConfigResponse is used to respond to setting the configuration +type SetConfigResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetConfigResponse) Reset() { *m = SetConfigResponse{} } +func (m *SetConfigResponse) String() string { return proto.CompactTextString(m) } +func (*SetConfigResponse) ProtoMessage() {} +func (*SetConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{7} +} +func (m *SetConfigResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetConfigResponse.Unmarshal(m, b) +} +func (m *SetConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetConfigResponse.Marshal(b, m, deterministic) +} +func (dst *SetConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetConfigResponse.Merge(dst, src) +} +func (m *SetConfigResponse) XXX_Size() int { + return xxx_messageInfo_SetConfigResponse.Size(m) +} +func (m *SetConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetConfigResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*PluginInfoRequest)(nil), "hashicorp.nomad.plugins.base.proto.PluginInfoRequest") + proto.RegisterType((*PluginInfoResponse)(nil), "hashicorp.nomad.plugins.base.proto.PluginInfoResponse") + proto.RegisterType((*ConfigSchemaRequest)(nil), "hashicorp.nomad.plugins.base.proto.ConfigSchemaRequest") + proto.RegisterType((*ConfigSchemaResponse)(nil), "hashicorp.nomad.plugins.base.proto.ConfigSchemaResponse") + proto.RegisterType((*SetConfigRequest)(nil), "hashicorp.nomad.plugins.base.proto.SetConfigRequest") + proto.RegisterType((*NomadConfig)(nil), "hashicorp.nomad.plugins.base.proto.NomadConfig") + proto.RegisterType((*NomadDriverConfig)(nil), "hashicorp.nomad.plugins.base.proto.NomadDriverConfig") + proto.RegisterType((*SetConfigResponse)(nil), "hashicorp.nomad.plugins.base.proto.SetConfigResponse") + proto.RegisterEnum("hashicorp.nomad.plugins.base.proto.PluginType", PluginType_name, PluginType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// BasePluginClient is the client API for BasePlugin service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type BasePluginClient interface { + // PluginInfo describes the type and version of a plugin. + PluginInfo(ctx context.Context, in *PluginInfoRequest, opts ...grpc.CallOption) (*PluginInfoResponse, error) + // ConfigSchema returns the schema for parsing the plugins configuration. + ConfigSchema(ctx context.Context, in *ConfigSchemaRequest, opts ...grpc.CallOption) (*ConfigSchemaResponse, error) + // SetConfig is used to set the configuration. + SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) +} + +type basePluginClient struct { + cc *grpc.ClientConn +} + +func NewBasePluginClient(cc *grpc.ClientConn) BasePluginClient { + return &basePluginClient{cc} +} + +func (c *basePluginClient) PluginInfo(ctx context.Context, in *PluginInfoRequest, opts ...grpc.CallOption) (*PluginInfoResponse, error) { + out := new(PluginInfoResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.base.proto.BasePlugin/PluginInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *basePluginClient) ConfigSchema(ctx context.Context, in *ConfigSchemaRequest, opts ...grpc.CallOption) (*ConfigSchemaResponse, error) { + out := new(ConfigSchemaResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.base.proto.BasePlugin/ConfigSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *basePluginClient) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) { + out := new(SetConfigResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.base.proto.BasePlugin/SetConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BasePluginServer is the server API for BasePlugin service. +type BasePluginServer interface { + // PluginInfo describes the type and version of a plugin. + PluginInfo(context.Context, *PluginInfoRequest) (*PluginInfoResponse, error) + // ConfigSchema returns the schema for parsing the plugins configuration. + ConfigSchema(context.Context, *ConfigSchemaRequest) (*ConfigSchemaResponse, error) + // SetConfig is used to set the configuration. + SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error) +} + +func RegisterBasePluginServer(s *grpc.Server, srv BasePluginServer) { + s.RegisterService(&_BasePlugin_serviceDesc, srv) +} + +func _BasePlugin_PluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PluginInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BasePluginServer).PluginInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.base.proto.BasePlugin/PluginInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BasePluginServer).PluginInfo(ctx, req.(*PluginInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BasePlugin_ConfigSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConfigSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BasePluginServer).ConfigSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.base.proto.BasePlugin/ConfigSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BasePluginServer).ConfigSchema(ctx, req.(*ConfigSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BasePlugin_SetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BasePluginServer).SetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.base.proto.BasePlugin/SetConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BasePluginServer).SetConfig(ctx, req.(*SetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _BasePlugin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "hashicorp.nomad.plugins.base.proto.BasePlugin", + HandlerType: (*BasePluginServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "PluginInfo", + Handler: _BasePlugin_PluginInfo_Handler, + }, + { + MethodName: "ConfigSchema", + Handler: _BasePlugin_ConfigSchema_Handler, + }, + { + MethodName: "SetConfig", + Handler: _BasePlugin_SetConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "plugins/base/proto/base.proto", +} + +func init() { proto.RegisterFile("plugins/base/proto/base.proto", fileDescriptor_base_6a1a5ff99a0b9e5d) } + +var fileDescriptor_base_6a1a5ff99a0b9e5d = []byte{ + // 535 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x41, 0x8f, 0x12, 0x4d, + 0x10, 0xdd, 0x01, 0x3e, 0x36, 0x14, 0xb0, 0x81, 0xe6, 0x33, 0x21, 0x24, 0x26, 0x64, 0xa2, 0x09, + 0x31, 0x9b, 0x9e, 0x88, 0xa2, 0x1e, 0x57, 0x58, 0x0e, 0xc4, 0x2c, 0x6e, 0x06, 0x45, 0x63, 0x4c, + 0x48, 0x33, 0xf4, 0x32, 0x1d, 0xa1, 0xbb, 0x9d, 0x1e, 0x36, 0xae, 0x89, 0x27, 0xcf, 0xfe, 0x22, + 0x8f, 0xfe, 0x31, 0x33, 0xdd, 0x0d, 0x0c, 0xbb, 0x1a, 0xe1, 0x34, 0x45, 0xd5, 0x7b, 0xaf, 0xaa, + 0x1e, 0xd5, 0x70, 0x5f, 0x2e, 0x56, 0x73, 0xc6, 0x95, 0x37, 0x25, 0x8a, 0x7a, 0x32, 0x12, 0xb1, + 0xd0, 0x21, 0xd6, 0x21, 0x72, 0x43, 0xa2, 0x42, 0x16, 0x88, 0x48, 0x62, 0x2e, 0x96, 0x64, 0x86, + 0x2d, 0x1c, 0x6f, 0x31, 0x8d, 0xb3, 0x39, 0x8b, 0xc3, 0xd5, 0x14, 0x07, 0x62, 0xe9, 0x6d, 0xe0, + 0x9e, 0x86, 0x7b, 0x6b, 0x75, 0x15, 0x92, 0x88, 0xce, 0xbc, 0x30, 0x58, 0x28, 0x49, 0x83, 0xe4, + 0x3b, 0x49, 0x02, 0xa3, 0xe0, 0xd6, 0xa0, 0x7a, 0xa9, 0x81, 0x03, 0x7e, 0x25, 0x7c, 0xfa, 0x79, + 0x45, 0x55, 0xec, 0xfe, 0x72, 0x00, 0xa5, 0xb3, 0x4a, 0x0a, 0xae, 0x28, 0xea, 0x42, 0x2e, 0xbe, + 0x91, 0xb4, 0xee, 0x34, 0x9d, 0xd6, 0x49, 0x1b, 0xe3, 0x7f, 0x0f, 0x88, 0x8d, 0xca, 0x9b, 0x1b, + 0x49, 0x7d, 0xcd, 0x45, 0x18, 0x6a, 0x06, 0x36, 0x21, 0x92, 0x4d, 0xae, 0x69, 0xa4, 0x98, 0xe0, + 0xaa, 0x9e, 0x69, 0x66, 0x5b, 0x05, 0xbf, 0x6a, 0x4a, 0x2f, 0x25, 0x1b, 0xdb, 0x02, 0x7a, 0x08, + 0x27, 0x16, 0x6f, 0xb1, 0xf5, 0x6c, 0xd3, 0x69, 0x15, 0xfc, 0xb2, 0xc9, 0x5a, 0x1c, 0x42, 0x90, + 0xe3, 0x64, 0x49, 0xeb, 0x39, 0x5d, 0xd4, 0xb1, 0x7b, 0x0f, 0x6a, 0x3d, 0xc1, 0xaf, 0xd8, 0x7c, + 0x14, 0x84, 0x74, 0x49, 0xd6, 0xcb, 0xbd, 0x87, 0xff, 0x77, 0xd3, 0x76, 0xbb, 0x33, 0xc8, 0x25, + 0xbe, 0xe8, 0xed, 0x8a, 0xed, 0xd3, 0xbf, 0x6e, 0x67, 0xfc, 0xc4, 0xd6, 0x4f, 0x3c, 0x92, 0x34, + 0xf0, 0x35, 0xd3, 0xfd, 0xe9, 0x40, 0x65, 0x44, 0x63, 0xa3, 0x6e, 0xdb, 0x25, 0x0b, 0x2c, 0xd5, + 0x5c, 0x92, 0xe0, 0xd3, 0x24, 0xd0, 0x05, 0xdd, 0xa0, 0xe4, 0x97, 0x6d, 0xd6, 0xa0, 0x91, 0x0f, + 0x25, 0xdd, 0x66, 0x0d, 0xca, 0xe8, 0x29, 0xbc, 0x7d, 0x3c, 0x1e, 0x26, 0x05, 0xdb, 0xb4, 0xc8, + 0xb7, 0x3f, 0xd0, 0x29, 0xa0, 0xbb, 0x5e, 0x5b, 0xff, 0x2a, 0xb7, 0xad, 0x76, 0x3f, 0x42, 0x31, + 0xa5, 0x84, 0x2e, 0x20, 0x3f, 0x8b, 0xd8, 0x35, 0x8d, 0xac, 0x21, 0x9d, 0xbd, 0x47, 0x39, 0xd7, + 0x34, 0x3b, 0x90, 0x15, 0x71, 0x27, 0x50, 0xbd, 0x53, 0x44, 0x0f, 0xa0, 0xdc, 0x5b, 0x30, 0xca, + 0xe3, 0x0b, 0xf2, 0xe5, 0x52, 0x44, 0xb1, 0x6e, 0x55, 0xf6, 0x77, 0x93, 0x29, 0x14, 0xe3, 0x1a, + 0x95, 0xd9, 0x41, 0x99, 0x64, 0x72, 0xc8, 0x29, 0xef, 0xcd, 0x7f, 0xfa, 0xe8, 0x31, 0xc0, 0xf6, + 0x02, 0x51, 0x11, 0x8e, 0xdf, 0x0e, 0x5f, 0x0d, 0x5f, 0xbf, 0x1b, 0x56, 0x8e, 0x10, 0x40, 0xfe, + 0xdc, 0x1f, 0x8c, 0xfb, 0x7e, 0x25, 0xa3, 0xe3, 0xfe, 0x78, 0xd0, 0xeb, 0x57, 0xb2, 0xed, 0x1f, + 0x59, 0x80, 0x2e, 0x51, 0xd4, 0xf0, 0xd0, 0xb7, 0xb5, 0x42, 0xf2, 0x12, 0x50, 0x67, 0xff, 0x9b, + 0x4f, 0xbd, 0xa7, 0xc6, 0xb3, 0x43, 0x69, 0x66, 0x7c, 0xf7, 0x08, 0x7d, 0x77, 0xa0, 0x94, 0xbe, + 0x56, 0xf4, 0x7c, 0x1f, 0xa9, 0x3f, 0x9c, 0x7d, 0xe3, 0xc5, 0xe1, 0xc4, 0xcd, 0x14, 0x5f, 0xa1, + 0xb0, 0xf1, 0x16, 0x3d, 0xdd, 0x47, 0xe8, 0xf6, 0x33, 0x68, 0x74, 0x0e, 0x64, 0xad, 0x7b, 0x77, + 0x8f, 0x3f, 0xfc, 0xa7, 0x8b, 0xd3, 0xbc, 0xfe, 0x3c, 0xf9, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xcc, + 0x26, 0x80, 0xcf, 0x37, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/base/proto/base.proto b/vendor/github.com/hashicorp/nomad/plugins/base/proto/base.proto new file mode 100644 index 0000000..344e938 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/base/proto/base.proto @@ -0,0 +1,88 @@ +syntax = "proto3"; +package hashicorp.nomad.plugins.base.proto; +option go_package = "proto"; + +import "github.com/hashicorp/nomad/plugins/shared/hclspec/hcl_spec.proto"; + +// BasePlugin is the methods that all Nomad plugins must support. +service BasePlugin { + + // PluginInfo describes the type and version of a plugin. + rpc PluginInfo(PluginInfoRequest) returns (PluginInfoResponse) {} + + // ConfigSchema returns the schema for parsing the plugins configuration. + rpc ConfigSchema(ConfigSchemaRequest) returns (ConfigSchemaResponse) {} + + // SetConfig is used to set the configuration. + rpc SetConfig(SetConfigRequest) returns (SetConfigResponse) {} +} + +// PluginType enumerates the type of plugins Nomad supports +enum PluginType { + UNKNOWN = 0; + DRIVER = 2; + DEVICE = 3; +} + +// PluginInfoRequest is used to request the plugins basic information. +message PluginInfoRequest {} + +// PluginInfoResponse returns basic information about the plugin such +// that Nomad can decide whether to load the plugin or not. +message PluginInfoResponse { + // type indicates what type of plugin this is. + PluginType type = 1; + + // plugin_api_versions indicates the versions of the Nomad Plugin API + // this plugin supports. + repeated string plugin_api_versions = 2; + + // plugin_version is the semver version of this individual plugin. + // This is divorce from Nomad’s development and versioning. + string plugin_version = 3; + + // name is the name of the plugin + string name = 4; +} + +// ConfigSchemaRequest is used to request the configurations schema. +message ConfigSchemaRequest {} + +// ConfigSchemaResponse returns the plugins configuration schema. +message ConfigSchemaResponse { + // spec is the plugins configuration schema + hashicorp.nomad.plugins.shared.hclspec.Spec spec = 1; +} + +// SetConfigRequest is used to set the configuration +message SetConfigRequest { + // msgpack_config is the configuration encoded as MessagePack. + bytes msgpack_config = 1; + + // nomad_config is the nomad client configuration sent to all plugins. + NomadConfig nomad_config = 2; + + // plugin_api_version is the api version to use. + string plugin_api_version = 3; +} + +// NomadConfig is the client configuration sent to all plugins +message NomadConfig { + // driver specific configuration sent to all plugins + NomadDriverConfig driver = 1; +} + +// NomadDriverConfig is the driver specific client configuration sent to all +// driver plugins +message NomadDriverConfig { + // ClientMaxPort is the upper range of the ports that the client uses for + // communicating with plugin subsystems over loopback + uint32 ClientMaxPort = 1; + + // ClientMinPort is the lower range of the ports that the client uses for + // communicating with plugin subsystems over loopback + uint32 ClientMinPort = 2; +} + +// SetConfigResponse is used to respond to setting the configuration +message SetConfigResponse {} diff --git a/vendor/github.com/hashicorp/nomad/plugins/base/server.go b/vendor/github.com/hashicorp/nomad/plugins/base/server.go new file mode 100644 index 0000000..af01fae --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/base/server.go @@ -0,0 +1,86 @@ +package base + +import ( + "fmt" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugins/base/proto" + "golang.org/x/net/context" +) + +// basePluginServer wraps a base plugin and exposes it via gRPC. +type basePluginServer struct { + broker *plugin.GRPCBroker + impl BasePlugin +} + +func (b *basePluginServer) PluginInfo(context.Context, *proto.PluginInfoRequest) (*proto.PluginInfoResponse, error) { + resp, err := b.impl.PluginInfo() + if err != nil { + return nil, err + } + + var ptype proto.PluginType + switch resp.Type { + case PluginTypeDriver: + ptype = proto.PluginType_DRIVER + case PluginTypeDevice: + ptype = proto.PluginType_DEVICE + default: + return nil, fmt.Errorf("plugin is of unknown type: %q", resp.Type) + } + + presp := &proto.PluginInfoResponse{ + Type: ptype, + PluginApiVersions: resp.PluginApiVersions, + PluginVersion: resp.PluginVersion, + Name: resp.Name, + } + + return presp, nil +} + +func (b *basePluginServer) ConfigSchema(context.Context, *proto.ConfigSchemaRequest) (*proto.ConfigSchemaResponse, error) { + spec, err := b.impl.ConfigSchema() + if err != nil { + return nil, err + } + + presp := &proto.ConfigSchemaResponse{ + Spec: spec, + } + + return presp, nil +} + +func (b *basePluginServer) SetConfig(ctx context.Context, req *proto.SetConfigRequest) (*proto.SetConfigResponse, error) { + info, err := b.impl.PluginInfo() + if err != nil { + return nil, err + } + + // Client configuration is filtered based on plugin type + cfg := nomadConfigFromProto(req.GetNomadConfig()) + filteredCfg := new(AgentConfig) + + if cfg != nil { + switch info.Type { + case PluginTypeDriver: + filteredCfg.Driver = cfg.Driver + } + } + + // Build the config request + c := &Config{ + ApiVersion: req.GetPluginApiVersion(), + PluginConfig: req.GetMsgpackConfig(), + AgentConfig: filteredCfg, + } + + // Set the config + if err := b.impl.SetConfig(c); err != nil { + return nil, fmt.Errorf("SetConfig failed: %v", err) + } + + return &proto.SetConfigResponse{}, nil +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/base/structs/errors.go b/vendor/github.com/hashicorp/nomad/plugins/base/structs/errors.go new file mode 100644 index 0000000..0a5a7a6 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/base/structs/errors.go @@ -0,0 +1,12 @@ +package structs + +import "errors" + +const ( + errPluginShutdown = "plugin is shut down" +) + +var ( + // ErrPluginShutdown is returned when the plugin has shutdown. + ErrPluginShutdown = errors.New(errPluginShutdown) +) diff --git a/vendor/github.com/hashicorp/nomad/plugins/base/testing.go b/vendor/github.com/hashicorp/nomad/plugins/base/testing.go new file mode 100644 index 0000000..f3107a5 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/base/testing.go @@ -0,0 +1,93 @@ +package base + +import ( + "github.com/hashicorp/nomad/plugins/shared/hclspec" +) + +var ( + // TestSpec is an hcl Spec for testing + TestSpec = &hclspec.Spec{ + Block: &hclspec.Spec_Object{ + Object: &hclspec.Object{ + Attributes: map[string]*hclspec.Spec{ + "foo": { + Block: &hclspec.Spec_Attr{ + Attr: &hclspec.Attr{ + Type: "string", + Required: false, + }, + }, + }, + "bar": { + Block: &hclspec.Spec_Attr{ + Attr: &hclspec.Attr{ + Type: "number", + Required: false, + }, + }, + }, + "baz": { + Block: &hclspec.Spec_Attr{ + Attr: &hclspec.Attr{ + Type: "bool", + }, + }, + }, + }, + }, + }, + } +) + +// TestConfig is used to decode a config from the TestSpec +type TestConfig struct { + Foo string `cty:"foo" codec:"foo"` + Bar int64 `cty:"bar" codec:"bar"` + Baz bool `cty:"baz" codec:"baz"` +} + +type PluginInfoFn func() (*PluginInfoResponse, error) +type ConfigSchemaFn func() (*hclspec.Spec, error) +type SetConfigFn func(*Config) error + +// MockPlugin is used for testing. +// Each function can be set as a closure to make assertions about how data +// is passed through the base plugin layer. +type MockPlugin struct { + PluginInfoF PluginInfoFn + ConfigSchemaF ConfigSchemaFn + SetConfigF SetConfigFn +} + +func (p *MockPlugin) PluginInfo() (*PluginInfoResponse, error) { return p.PluginInfoF() } +func (p *MockPlugin) ConfigSchema() (*hclspec.Spec, error) { return p.ConfigSchemaF() } +func (p *MockPlugin) SetConfig(cfg *Config) error { + return p.SetConfigF(cfg) +} + +// Below are static implementations of the base plugin functions + +// StaticInfo returns the passed PluginInfoResponse with no error +func StaticInfo(out *PluginInfoResponse) PluginInfoFn { + return func() (*PluginInfoResponse, error) { + return out, nil + } +} + +// StaticConfigSchema returns the passed Spec with no error +func StaticConfigSchema(out *hclspec.Spec) ConfigSchemaFn { + return func() (*hclspec.Spec, error) { + return out, nil + } +} + +// TestConfigSchema returns a ConfigSchemaFn that statically returns the +// TestSpec +func TestConfigSchema() ConfigSchemaFn { + return StaticConfigSchema(TestSpec) +} + +// NoopSetConfig is a noop implementation of set config +func NoopSetConfig() SetConfigFn { + return func(_ *Config) error { return nil } +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/device/client.go b/vendor/github.com/hashicorp/nomad/plugins/device/client.go new file mode 100644 index 0000000..1538b2b --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/device/client.go @@ -0,0 +1,150 @@ +package device + +import ( + "context" + "io" + "time" + + "github.com/LK4D4/joincontext" + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/nomad/helper/pluginutils/grpcutils" + "github.com/hashicorp/nomad/plugins/base" + "github.com/hashicorp/nomad/plugins/device/proto" +) + +// devicePluginClient implements the client side of a remote device plugin, using +// gRPC to communicate to the remote plugin. +type devicePluginClient struct { + // basePluginClient is embedded to give access to the base plugin methods. + *base.BasePluginClient + + client proto.DevicePluginClient + + // doneCtx is closed when the plugin exits + doneCtx context.Context +} + +// Fingerprint is used to retrieve the set of devices and their health from the +// device plugin. An error may be immediately returned if the fingerprint call +// could not be made or as part of the streaming response. If the context is +// cancelled, the error will be propagated. +func (d *devicePluginClient) Fingerprint(ctx context.Context) (<-chan *FingerprintResponse, error) { + // Join the passed context and the shutdown context + joinedCtx, _ := joincontext.Join(ctx, d.doneCtx) + + var req proto.FingerprintRequest + stream, err := d.client.Fingerprint(joinedCtx, &req) + if err != nil { + return nil, grpcutils.HandleReqCtxGrpcErr(err, ctx, d.doneCtx) + } + + out := make(chan *FingerprintResponse, 1) + go d.handleFingerprint(ctx, stream, out) + return out, nil +} + +// handleFingerprint should be launched in a goroutine and handles converting +// the gRPC stream to a channel. Exits either when context is cancelled or the +// stream has an error. +func (d *devicePluginClient) handleFingerprint( + reqCtx context.Context, + stream proto.DevicePlugin_FingerprintClient, + out chan *FingerprintResponse) { + + defer close(out) + for { + resp, err := stream.Recv() + if err != nil { + if err != io.EOF { + out <- &FingerprintResponse{ + Error: grpcutils.HandleReqCtxGrpcErr(err, reqCtx, d.doneCtx), + } + } + + // End the stream + return + } + + // Send the response + f := &FingerprintResponse{ + Devices: convertProtoDeviceGroups(resp.GetDeviceGroup()), + } + select { + case <-reqCtx.Done(): + return + case out <- f: + } + } +} + +func (d *devicePluginClient) Reserve(deviceIDs []string) (*ContainerReservation, error) { + // Build the request + req := &proto.ReserveRequest{ + DeviceIds: deviceIDs, + } + + // Make the request + resp, err := d.client.Reserve(d.doneCtx, req) + if err != nil { + return nil, grpcutils.HandleGrpcErr(err, d.doneCtx) + } + + // Convert the response + out := convertProtoContainerReservation(resp.GetContainerRes()) + return out, nil +} + +// Stats is used to retrieve device statistics from the device plugin. An error +// may be immediately returned if the stats call could not be made or as part of +// the streaming response. If the context is cancelled, the error will be +// propagated. +func (d *devicePluginClient) Stats(ctx context.Context, interval time.Duration) (<-chan *StatsResponse, error) { + // Join the passed context and the shutdown context + joinedCtx, _ := joincontext.Join(ctx, d.doneCtx) + + req := proto.StatsRequest{ + CollectionInterval: ptypes.DurationProto(interval), + } + stream, err := d.client.Stats(joinedCtx, &req) + if err != nil { + return nil, grpcutils.HandleReqCtxGrpcErr(err, ctx, d.doneCtx) + } + + out := make(chan *StatsResponse, 1) + go d.handleStats(ctx, stream, out) + return out, nil +} + +// handleStats should be launched in a goroutine and handles converting +// the gRPC stream to a channel. Exits either when context is cancelled or the +// stream has an error. +func (d *devicePluginClient) handleStats( + reqCtx context.Context, + stream proto.DevicePlugin_StatsClient, + out chan *StatsResponse) { + + defer close(out) + for { + resp, err := stream.Recv() + if err != nil { + if err != io.EOF { + out <- &StatsResponse{ + Error: grpcutils.HandleReqCtxGrpcErr(err, reqCtx, d.doneCtx), + } + } + + // End the stream + return + } + + // Send the response + s := &StatsResponse{ + Groups: convertProtoDeviceGroupsStats(resp.GetGroups()), + } + select { + case <-reqCtx.Done(): + return + case out <- s: + } + } +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/device/device.go b/vendor/github.com/hashicorp/nomad/plugins/device/device.go new file mode 100644 index 0000000..c1e8b0b --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/device/device.go @@ -0,0 +1,221 @@ +package device + +import ( + "context" + "fmt" + "time" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/plugins/base" + "github.com/hashicorp/nomad/plugins/shared/structs" +) + +const ( + // DeviceTypeGPU is a canonical device type for a GPU. + DeviceTypeGPU = "gpu" +) + +// DevicePlugin is the interface for a plugin that can expose detected devices +// to Nomad and inform it how to mount them. +type DevicePlugin interface { + base.BasePlugin + + // Fingerprint returns a stream of devices that are detected. + Fingerprint(ctx context.Context) (<-chan *FingerprintResponse, error) + + // Reserve is used to reserve a set of devices and retrieve mount + // instructions. + Reserve(deviceIDs []string) (*ContainerReservation, error) + + // Stats returns a stream of statistics per device collected at the passed + // interval. + Stats(ctx context.Context, interval time.Duration) (<-chan *StatsResponse, error) +} + +// FingerprintResponse includes a set of detected devices or an error in the +// process of fingerprinting. +type FingerprintResponse struct { + // Devices is a set of devices that have been detected. + Devices []*DeviceGroup + + // Error is populated when fingerprinting has failed. + Error error +} + +// NewFingerprint takes a set of device groups and returns a fingerprint +// response +func NewFingerprint(devices ...*DeviceGroup) *FingerprintResponse { + return &FingerprintResponse{ + Devices: devices, + } +} + +// NewFingerprintError takes an error and returns a fingerprint response +func NewFingerprintError(err error) *FingerprintResponse { + return &FingerprintResponse{ + Error: err, + } +} + +// DeviceGroup is a grouping of devices that share a common vendor, device type +// and name. +type DeviceGroup struct { + // Vendor is the vendor providing the device (nvidia, intel, etc). + Vendor string + + // Type is the type of the device (gpu, fpga, etc). + Type string + + // Name is the devices model name. + Name string + + // Devices is the set of device instances. + Devices []*Device + + // Attributes are a set of attributes shared for all the devices. + Attributes map[string]*structs.Attribute +} + +// Validate validates that the device group is valid +func (d *DeviceGroup) Validate() error { + var mErr multierror.Error + + if d.Vendor == "" { + multierror.Append(&mErr, fmt.Errorf("device vendor must be specified")) + } + if d.Type == "" { + multierror.Append(&mErr, fmt.Errorf("device type must be specified")) + } + if d.Name == "" { + multierror.Append(&mErr, fmt.Errorf("device name must be specified")) + } + + for i, dev := range d.Devices { + if dev == nil { + multierror.Append(&mErr, fmt.Errorf("device %d is nil", i)) + continue + } + + if err := dev.Validate(); err != nil { + multierror.Append(&mErr, multierror.Prefix(err, fmt.Sprintf("device %d: ", i))) + } + } + + for k, v := range d.Attributes { + if err := v.Validate(); err != nil { + multierror.Append(&mErr, fmt.Errorf("device attribute %q invalid: %v", k, err)) + } + } + + return mErr.ErrorOrNil() + +} + +// Device is an instance of a particular device. +type Device struct { + // ID is the identifier for the device. + ID string + + // Healthy marks whether the device is healthy and can be used for + // scheduling. + Healthy bool + + // HealthDesc describes why the device may be unhealthy. + HealthDesc string + + // HwLocality captures hardware locality information for the device. + HwLocality *DeviceLocality +} + +// Validate validates that the device is valid +func (d *Device) Validate() error { + if d.ID == "" { + return fmt.Errorf("device ID must be specified") + } + + return nil +} + +// DeviceLocality captures hardware locality information for a device. +type DeviceLocality struct { + // PciBusID is the PCI bus ID of the device. + PciBusID string +} + +// ContainerReservation describes how to mount a device into a container. A +// container is an isolated environment that shares the host's OS. +type ContainerReservation struct { + // Envs are a set of environment variables to set for the task. + Envs map[string]string + + // Mounts are used to mount host volumes into a container that may include + // libraries, etc. + Mounts []*Mount + + // Devices are the set of devices to mount into the container. + Devices []*DeviceSpec +} + +// Mount is used to mount a host directory into a container. +type Mount struct { + // TaskPath is the location in the task's file system to mount. + TaskPath string + + // HostPath is the host directory path to mount. + HostPath string + + // ReadOnly defines whether the mount should be read only to the task. + ReadOnly bool +} + +// DeviceSpec captures how to mount a device into a container. +type DeviceSpec struct { + // TaskPath is the location to mount the device in the task's file system. + TaskPath string + + // HostPath is the host location of the device. + HostPath string + + // CgroupPerms defines the permissions to use when mounting the device. + CgroupPerms string +} + +// StatsResponse returns statistics for each device group. +type StatsResponse struct { + // Groups contains statistics for each device group. + Groups []*DeviceGroupStats + + // Error is populated when collecting statistics has failed. + Error error +} + +// NewStatsError takes an error and returns a stats response +func NewStatsError(err error) *StatsResponse { + return &StatsResponse{ + Error: err, + } +} + +// DeviceGroupStats contains statistics for each device of a particular +// device group, identified by the vendor, type and name of the device. +type DeviceGroupStats struct { + Vendor string + Type string + Name string + + // InstanceStats is a mapping of each device ID to its statistics. + InstanceStats map[string]*DeviceStats +} + +// DeviceStats is the statistics for an individual device +type DeviceStats struct { + // Summary exposes a single summary metric that should be the most + // informative to users. + Summary *structs.StatValue + + // Stats contains the verbose statistics for the device. + Stats *structs.StatObject + + // Timestamp is the time the statistics were collected. + Timestamp time.Time +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/device/mock.go b/vendor/github.com/hashicorp/nomad/plugins/device/mock.go new file mode 100644 index 0000000..5ac1b46 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/device/mock.go @@ -0,0 +1,108 @@ +package device + +import ( + "context" + "time" + + "github.com/hashicorp/nomad/plugins/base" +) + +type FingerprintFn func(context.Context) (<-chan *FingerprintResponse, error) +type ReserveFn func([]string) (*ContainerReservation, error) +type StatsFn func(context.Context, time.Duration) (<-chan *StatsResponse, error) + +// MockDevicePlugin is used for testing. +// Each function can be set as a closure to make assertions about how data +// is passed through the base plugin layer. +type MockDevicePlugin struct { + *base.MockPlugin + FingerprintF FingerprintFn + ReserveF ReserveFn + StatsF StatsFn +} + +func (p *MockDevicePlugin) Fingerprint(ctx context.Context) (<-chan *FingerprintResponse, error) { + return p.FingerprintF(ctx) +} + +func (p *MockDevicePlugin) Reserve(devices []string) (*ContainerReservation, error) { + return p.ReserveF(devices) +} + +func (p *MockDevicePlugin) Stats(ctx context.Context, interval time.Duration) (<-chan *StatsResponse, error) { + return p.StatsF(ctx, interval) +} + +// Below are static implementations of the device functions + +// StaticFingerprinter fingerprints the passed devices just once +func StaticFingerprinter(devices []*DeviceGroup) FingerprintFn { + return func(_ context.Context) (<-chan *FingerprintResponse, error) { + outCh := make(chan *FingerprintResponse, 1) + outCh <- &FingerprintResponse{ + Devices: devices, + } + return outCh, nil + } +} + +// ErrorChFingerprinter returns an error fingerprinting over the channel +func ErrorChFingerprinter(err error) FingerprintFn { + return func(_ context.Context) (<-chan *FingerprintResponse, error) { + outCh := make(chan *FingerprintResponse, 1) + outCh <- &FingerprintResponse{ + Error: err, + } + return outCh, nil + } +} + +// StaticReserve returns the passed container reservation +func StaticReserve(out *ContainerReservation) ReserveFn { + return func(_ []string) (*ContainerReservation, error) { + return out, nil + } +} + +// ErrorReserve returns the passed error +func ErrorReserve(err error) ReserveFn { + return func(_ []string) (*ContainerReservation, error) { + return nil, err + } +} + +// StaticStats returns the passed statistics +func StaticStats(out []*DeviceGroupStats) StatsFn { + return func(ctx context.Context, intv time.Duration) (<-chan *StatsResponse, error) { + outCh := make(chan *StatsResponse, 1) + + go func() { + ticker := time.NewTimer(0) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ticker.Reset(intv) + } + + outCh <- &StatsResponse{ + Groups: out, + } + } + }() + + return outCh, nil + } +} + +// ErrorChStats returns an error collecting stats over the channel +func ErrorChStats(err error) StatsFn { + return func(_ context.Context, _ time.Duration) (<-chan *StatsResponse, error) { + outCh := make(chan *StatsResponse, 1) + outCh <- &StatsResponse{ + Error: err, + } + return outCh, nil + } +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/device/plugin.go b/vendor/github.com/hashicorp/nomad/plugins/device/plugin.go new file mode 100644 index 0000000..65ec195 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/device/plugin.go @@ -0,0 +1,51 @@ +package device + +import ( + "context" + + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugins/base" + bproto "github.com/hashicorp/nomad/plugins/base/proto" + "github.com/hashicorp/nomad/plugins/device/proto" + "google.golang.org/grpc" +) + +// PluginDevice is wraps a DevicePlugin and implements go-plugins GRPCPlugin +// interface to expose the interface over gRPC. +type PluginDevice struct { + plugin.NetRPCUnsupportedPlugin + Impl DevicePlugin +} + +func (p *PluginDevice) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterDevicePluginServer(s, &devicePluginServer{ + impl: p.Impl, + broker: broker, + }) + return nil +} + +func (p *PluginDevice) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &devicePluginClient{ + doneCtx: ctx, + client: proto.NewDevicePluginClient(c), + BasePluginClient: &base.BasePluginClient{ + Client: bproto.NewBasePluginClient(c), + DoneCtx: ctx, + }, + }, nil +} + +// Serve is used to serve a device plugin +func Serve(dev DevicePlugin, logger log.Logger) { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: base.Handshake, + Plugins: map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: dev}, + base.PluginTypeDevice: &PluginDevice{Impl: dev}, + }, + GRPCServer: plugin.DefaultGRPCServer, + Logger: logger, + }) +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/device/proto/device.pb.go b/vendor/github.com/hashicorp/nomad/plugins/device/proto/device.pb.go new file mode 100644 index 0000000..744eff1 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/device/proto/device.pb.go @@ -0,0 +1,1054 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/device/proto/device.proto + +package proto + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import proto1 "github.com/hashicorp/nomad/plugins/shared/structs/proto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// FingerprintRequest is used to request for devices to be fingerprinted. +type FingerprintRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FingerprintRequest) Reset() { *m = FingerprintRequest{} } +func (m *FingerprintRequest) String() string { return proto.CompactTextString(m) } +func (*FingerprintRequest) ProtoMessage() {} +func (*FingerprintRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{0} +} +func (m *FingerprintRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FingerprintRequest.Unmarshal(m, b) +} +func (m *FingerprintRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FingerprintRequest.Marshal(b, m, deterministic) +} +func (dst *FingerprintRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FingerprintRequest.Merge(dst, src) +} +func (m *FingerprintRequest) XXX_Size() int { + return xxx_messageInfo_FingerprintRequest.Size(m) +} +func (m *FingerprintRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FingerprintRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FingerprintRequest proto.InternalMessageInfo + +// FingerprintResponse returns a set of detected devices. +type FingerprintResponse struct { + // device_group is a group of devices that share a vendor, device_type, and + // device_name. This is returned as a set so that a single plugin could + // potentially detect several device types and models. + DeviceGroup []*DeviceGroup `protobuf:"bytes,1,rep,name=device_group,json=deviceGroup,proto3" json:"device_group,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FingerprintResponse) Reset() { *m = FingerprintResponse{} } +func (m *FingerprintResponse) String() string { return proto.CompactTextString(m) } +func (*FingerprintResponse) ProtoMessage() {} +func (*FingerprintResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{1} +} +func (m *FingerprintResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FingerprintResponse.Unmarshal(m, b) +} +func (m *FingerprintResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FingerprintResponse.Marshal(b, m, deterministic) +} +func (dst *FingerprintResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FingerprintResponse.Merge(dst, src) +} +func (m *FingerprintResponse) XXX_Size() int { + return xxx_messageInfo_FingerprintResponse.Size(m) +} +func (m *FingerprintResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FingerprintResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FingerprintResponse proto.InternalMessageInfo + +func (m *FingerprintResponse) GetDeviceGroup() []*DeviceGroup { + if m != nil { + return m.DeviceGroup + } + return nil +} + +// DeviceGroup is a group of devices that share a vendor, device type and name. +type DeviceGroup struct { + // vendor is the name of the vendor of the device + Vendor string `protobuf:"bytes,1,opt,name=vendor,proto3" json:"vendor,omitempty"` + // device_type is the type of the device (gpu, fpga, etc). + DeviceType string `protobuf:"bytes,2,opt,name=device_type,json=deviceType,proto3" json:"device_type,omitempty"` + // device_name is the name of the device. + DeviceName string `protobuf:"bytes,3,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"` + // devices is the set of devices detected by the plugin. + Devices []*DetectedDevice `protobuf:"bytes,4,rep,name=devices,proto3" json:"devices,omitempty"` + // attributes allows adding attributes to be used for constraints or + // affinities. + Attributes map[string]*proto1.Attribute `protobuf:"bytes,5,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceGroup) Reset() { *m = DeviceGroup{} } +func (m *DeviceGroup) String() string { return proto.CompactTextString(m) } +func (*DeviceGroup) ProtoMessage() {} +func (*DeviceGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{2} +} +func (m *DeviceGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceGroup.Unmarshal(m, b) +} +func (m *DeviceGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceGroup.Marshal(b, m, deterministic) +} +func (dst *DeviceGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceGroup.Merge(dst, src) +} +func (m *DeviceGroup) XXX_Size() int { + return xxx_messageInfo_DeviceGroup.Size(m) +} +func (m *DeviceGroup) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceGroup proto.InternalMessageInfo + +func (m *DeviceGroup) GetVendor() string { + if m != nil { + return m.Vendor + } + return "" +} + +func (m *DeviceGroup) GetDeviceType() string { + if m != nil { + return m.DeviceType + } + return "" +} + +func (m *DeviceGroup) GetDeviceName() string { + if m != nil { + return m.DeviceName + } + return "" +} + +func (m *DeviceGroup) GetDevices() []*DetectedDevice { + if m != nil { + return m.Devices + } + return nil +} + +func (m *DeviceGroup) GetAttributes() map[string]*proto1.Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +// DetectedDevice is a single detected device. +type DetectedDevice struct { + // ID is the ID of the device. This ID is used during allocation and must be + // stable across restarts of the device driver. + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Health of the device. + Healthy bool `protobuf:"varint,2,opt,name=healthy,proto3" json:"healthy,omitempty"` + // health_description allows the device plugin to optionally + // annotate the health field with a human readable reason. + HealthDescription string `protobuf:"bytes,3,opt,name=health_description,json=healthDescription,proto3" json:"health_description,omitempty"` + // hw_locality is optionally set to expose hardware locality information for + // more optimal placement decisions. + HwLocality *DeviceLocality `protobuf:"bytes,4,opt,name=hw_locality,json=hwLocality,proto3" json:"hw_locality,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DetectedDevice) Reset() { *m = DetectedDevice{} } +func (m *DetectedDevice) String() string { return proto.CompactTextString(m) } +func (*DetectedDevice) ProtoMessage() {} +func (*DetectedDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{3} +} +func (m *DetectedDevice) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DetectedDevice.Unmarshal(m, b) +} +func (m *DetectedDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DetectedDevice.Marshal(b, m, deterministic) +} +func (dst *DetectedDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_DetectedDevice.Merge(dst, src) +} +func (m *DetectedDevice) XXX_Size() int { + return xxx_messageInfo_DetectedDevice.Size(m) +} +func (m *DetectedDevice) XXX_DiscardUnknown() { + xxx_messageInfo_DetectedDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_DetectedDevice proto.InternalMessageInfo + +func (m *DetectedDevice) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *DetectedDevice) GetHealthy() bool { + if m != nil { + return m.Healthy + } + return false +} + +func (m *DetectedDevice) GetHealthDescription() string { + if m != nil { + return m.HealthDescription + } + return "" +} + +func (m *DetectedDevice) GetHwLocality() *DeviceLocality { + if m != nil { + return m.HwLocality + } + return nil +} + +// DeviceLocality is used to expose HW locality information about a device. +type DeviceLocality struct { + // pci_bus_id is the PCI bus ID for the device. If reported, it + // allows Nomad to make NUMA aware optimizations. + PciBusId string `protobuf:"bytes,1,opt,name=pci_bus_id,json=pciBusId,proto3" json:"pci_bus_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceLocality) Reset() { *m = DeviceLocality{} } +func (m *DeviceLocality) String() string { return proto.CompactTextString(m) } +func (*DeviceLocality) ProtoMessage() {} +func (*DeviceLocality) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{4} +} +func (m *DeviceLocality) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceLocality.Unmarshal(m, b) +} +func (m *DeviceLocality) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceLocality.Marshal(b, m, deterministic) +} +func (dst *DeviceLocality) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceLocality.Merge(dst, src) +} +func (m *DeviceLocality) XXX_Size() int { + return xxx_messageInfo_DeviceLocality.Size(m) +} +func (m *DeviceLocality) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceLocality.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceLocality proto.InternalMessageInfo + +func (m *DeviceLocality) GetPciBusId() string { + if m != nil { + return m.PciBusId + } + return "" +} + +// ReserveRequest is used to ask the device driver for information on +// how to allocate the requested devices. +type ReserveRequest struct { + // device_ids are the requested devices. + DeviceIds []string `protobuf:"bytes,1,rep,name=device_ids,json=deviceIds,proto3" json:"device_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReserveRequest) Reset() { *m = ReserveRequest{} } +func (m *ReserveRequest) String() string { return proto.CompactTextString(m) } +func (*ReserveRequest) ProtoMessage() {} +func (*ReserveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{5} +} +func (m *ReserveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReserveRequest.Unmarshal(m, b) +} +func (m *ReserveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReserveRequest.Marshal(b, m, deterministic) +} +func (dst *ReserveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReserveRequest.Merge(dst, src) +} +func (m *ReserveRequest) XXX_Size() int { + return xxx_messageInfo_ReserveRequest.Size(m) +} +func (m *ReserveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReserveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReserveRequest proto.InternalMessageInfo + +func (m *ReserveRequest) GetDeviceIds() []string { + if m != nil { + return m.DeviceIds + } + return nil +} + +// ReserveResponse informs Nomad how to expose the requested devices +// to the the task. +type ReserveResponse struct { + // container_res contains information on how to mount the device + // into a task isolated using container technologies (where the + // host is shared) + ContainerRes *ContainerReservation `protobuf:"bytes,1,opt,name=container_res,json=containerRes,proto3" json:"container_res,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReserveResponse) Reset() { *m = ReserveResponse{} } +func (m *ReserveResponse) String() string { return proto.CompactTextString(m) } +func (*ReserveResponse) ProtoMessage() {} +func (*ReserveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{6} +} +func (m *ReserveResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReserveResponse.Unmarshal(m, b) +} +func (m *ReserveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReserveResponse.Marshal(b, m, deterministic) +} +func (dst *ReserveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReserveResponse.Merge(dst, src) +} +func (m *ReserveResponse) XXX_Size() int { + return xxx_messageInfo_ReserveResponse.Size(m) +} +func (m *ReserveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReserveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReserveResponse proto.InternalMessageInfo + +func (m *ReserveResponse) GetContainerRes() *ContainerReservation { + if m != nil { + return m.ContainerRes + } + return nil +} + +// ContainerReservation returns how to mount the device into a +// container that shares the host OS. +type ContainerReservation struct { + // List of environment variable to be set + Envs map[string]string `protobuf:"bytes,1,rep,name=envs,proto3" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Mounts for the task. + Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + // Devices for the task. + Devices []*DeviceSpec `protobuf:"bytes,3,rep,name=devices,proto3" json:"devices,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerReservation) Reset() { *m = ContainerReservation{} } +func (m *ContainerReservation) String() string { return proto.CompactTextString(m) } +func (*ContainerReservation) ProtoMessage() {} +func (*ContainerReservation) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{7} +} +func (m *ContainerReservation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ContainerReservation.Unmarshal(m, b) +} +func (m *ContainerReservation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ContainerReservation.Marshal(b, m, deterministic) +} +func (dst *ContainerReservation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerReservation.Merge(dst, src) +} +func (m *ContainerReservation) XXX_Size() int { + return xxx_messageInfo_ContainerReservation.Size(m) +} +func (m *ContainerReservation) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerReservation.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerReservation proto.InternalMessageInfo + +func (m *ContainerReservation) GetEnvs() map[string]string { + if m != nil { + return m.Envs + } + return nil +} + +func (m *ContainerReservation) GetMounts() []*Mount { + if m != nil { + return m.Mounts + } + return nil +} + +func (m *ContainerReservation) GetDevices() []*DeviceSpec { + if m != nil { + return m.Devices + } + return nil +} + +// Mount specifies a host volume to mount into a task. +// where device library or tools are installed on host and task +type Mount struct { + // Path of the mount within the task. + TaskPath string `protobuf:"bytes,1,opt,name=task_path,json=taskPath,proto3" json:"task_path,omitempty"` + // Path of the mount on the host. + HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` + // If set, the mount is read-only. + ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (m *Mount) String() string { return proto.CompactTextString(m) } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{8} +} +func (m *Mount) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Mount.Unmarshal(m, b) +} +func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Mount.Marshal(b, m, deterministic) +} +func (dst *Mount) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mount.Merge(dst, src) +} +func (m *Mount) XXX_Size() int { + return xxx_messageInfo_Mount.Size(m) +} +func (m *Mount) XXX_DiscardUnknown() { + xxx_messageInfo_Mount.DiscardUnknown(m) +} + +var xxx_messageInfo_Mount proto.InternalMessageInfo + +func (m *Mount) GetTaskPath() string { + if m != nil { + return m.TaskPath + } + return "" +} + +func (m *Mount) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *Mount) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +// DeviceSpec specifies a host device to mount into a task. +type DeviceSpec struct { + // Path of the device within the task. + TaskPath string `protobuf:"bytes,1,opt,name=task_path,json=taskPath,proto3" json:"task_path,omitempty"` + // Path of the device on the host. + HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` + // Cgroups permissions of the device, candidates are one or more of + // * r - allows task to read from the specified device. + // * w - allows task to write to the specified device. + // * m - allows task to create device files that do not yet exist + Permissions string `protobuf:"bytes,3,opt,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceSpec) Reset() { *m = DeviceSpec{} } +func (m *DeviceSpec) String() string { return proto.CompactTextString(m) } +func (*DeviceSpec) ProtoMessage() {} +func (*DeviceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{9} +} +func (m *DeviceSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceSpec.Unmarshal(m, b) +} +func (m *DeviceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceSpec.Marshal(b, m, deterministic) +} +func (dst *DeviceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSpec.Merge(dst, src) +} +func (m *DeviceSpec) XXX_Size() int { + return xxx_messageInfo_DeviceSpec.Size(m) +} +func (m *DeviceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSpec proto.InternalMessageInfo + +func (m *DeviceSpec) GetTaskPath() string { + if m != nil { + return m.TaskPath + } + return "" +} + +func (m *DeviceSpec) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *DeviceSpec) GetPermissions() string { + if m != nil { + return m.Permissions + } + return "" +} + +// StatsRequest is used to parameterize the retrieval of statistics. +type StatsRequest struct { + // collection_interval is the duration in which to collect statistics. + CollectionInterval *duration.Duration `protobuf:"bytes,1,opt,name=collection_interval,json=collectionInterval,proto3" json:"collection_interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (m *StatsRequest) String() string { return proto.CompactTextString(m) } +func (*StatsRequest) ProtoMessage() {} +func (*StatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{10} +} +func (m *StatsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatsRequest.Unmarshal(m, b) +} +func (m *StatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatsRequest.Marshal(b, m, deterministic) +} +func (dst *StatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatsRequest.Merge(dst, src) +} +func (m *StatsRequest) XXX_Size() int { + return xxx_messageInfo_StatsRequest.Size(m) +} +func (m *StatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatsRequest proto.InternalMessageInfo + +func (m *StatsRequest) GetCollectionInterval() *duration.Duration { + if m != nil { + return m.CollectionInterval + } + return nil +} + +// StatsResponse returns the statistics for each device group. +type StatsResponse struct { + // groups contains statistics for each device group. + Groups []*DeviceGroupStats `protobuf:"bytes,1,rep,name=groups,proto3" json:"groups,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatsResponse) Reset() { *m = StatsResponse{} } +func (m *StatsResponse) String() string { return proto.CompactTextString(m) } +func (*StatsResponse) ProtoMessage() {} +func (*StatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{11} +} +func (m *StatsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatsResponse.Unmarshal(m, b) +} +func (m *StatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatsResponse.Marshal(b, m, deterministic) +} +func (dst *StatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatsResponse.Merge(dst, src) +} +func (m *StatsResponse) XXX_Size() int { + return xxx_messageInfo_StatsResponse.Size(m) +} +func (m *StatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatsResponse proto.InternalMessageInfo + +func (m *StatsResponse) GetGroups() []*DeviceGroupStats { + if m != nil { + return m.Groups + } + return nil +} + +// DeviceGroupStats contains statistics for each device of a particular +// device group, identified by the vendor, type and name of the device. +type DeviceGroupStats struct { + Vendor string `protobuf:"bytes,1,opt,name=vendor,proto3" json:"vendor,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // instance_stats is a mapping of each device ID to its statistics. + InstanceStats map[string]*DeviceStats `protobuf:"bytes,4,rep,name=instance_stats,json=instanceStats,proto3" json:"instance_stats,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceGroupStats) Reset() { *m = DeviceGroupStats{} } +func (m *DeviceGroupStats) String() string { return proto.CompactTextString(m) } +func (*DeviceGroupStats) ProtoMessage() {} +func (*DeviceGroupStats) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{12} +} +func (m *DeviceGroupStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceGroupStats.Unmarshal(m, b) +} +func (m *DeviceGroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceGroupStats.Marshal(b, m, deterministic) +} +func (dst *DeviceGroupStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceGroupStats.Merge(dst, src) +} +func (m *DeviceGroupStats) XXX_Size() int { + return xxx_messageInfo_DeviceGroupStats.Size(m) +} +func (m *DeviceGroupStats) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceGroupStats.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceGroupStats proto.InternalMessageInfo + +func (m *DeviceGroupStats) GetVendor() string { + if m != nil { + return m.Vendor + } + return "" +} + +func (m *DeviceGroupStats) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *DeviceGroupStats) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeviceGroupStats) GetInstanceStats() map[string]*DeviceStats { + if m != nil { + return m.InstanceStats + } + return nil +} + +// DeviceStats is the statistics for an individual device +type DeviceStats struct { + // summary exposes a single summary metric that should be the most + // informative to users. + Summary *proto1.StatValue `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + // stats contains the verbose statistics for the device. + Stats *proto1.StatObject `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats,omitempty"` + // timestamp is the time the statistics were collected. + Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeviceStats) Reset() { *m = DeviceStats{} } +func (m *DeviceStats) String() string { return proto.CompactTextString(m) } +func (*DeviceStats) ProtoMessage() {} +func (*DeviceStats) Descriptor() ([]byte, []int) { + return fileDescriptor_device_a4d1cccedbd8401c, []int{13} +} +func (m *DeviceStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeviceStats.Unmarshal(m, b) +} +func (m *DeviceStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeviceStats.Marshal(b, m, deterministic) +} +func (dst *DeviceStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceStats.Merge(dst, src) +} +func (m *DeviceStats) XXX_Size() int { + return xxx_messageInfo_DeviceStats.Size(m) +} +func (m *DeviceStats) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceStats.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceStats proto.InternalMessageInfo + +func (m *DeviceStats) GetSummary() *proto1.StatValue { + if m != nil { + return m.Summary + } + return nil +} + +func (m *DeviceStats) GetStats() *proto1.StatObject { + if m != nil { + return m.Stats + } + return nil +} + +func (m *DeviceStats) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func init() { + proto.RegisterType((*FingerprintRequest)(nil), "hashicorp.nomad.plugins.device.FingerprintRequest") + proto.RegisterType((*FingerprintResponse)(nil), "hashicorp.nomad.plugins.device.FingerprintResponse") + proto.RegisterType((*DeviceGroup)(nil), "hashicorp.nomad.plugins.device.DeviceGroup") + proto.RegisterMapType((map[string]*proto1.Attribute)(nil), "hashicorp.nomad.plugins.device.DeviceGroup.AttributesEntry") + proto.RegisterType((*DetectedDevice)(nil), "hashicorp.nomad.plugins.device.DetectedDevice") + proto.RegisterType((*DeviceLocality)(nil), "hashicorp.nomad.plugins.device.DeviceLocality") + proto.RegisterType((*ReserveRequest)(nil), "hashicorp.nomad.plugins.device.ReserveRequest") + proto.RegisterType((*ReserveResponse)(nil), "hashicorp.nomad.plugins.device.ReserveResponse") + proto.RegisterType((*ContainerReservation)(nil), "hashicorp.nomad.plugins.device.ContainerReservation") + proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.device.ContainerReservation.EnvsEntry") + proto.RegisterType((*Mount)(nil), "hashicorp.nomad.plugins.device.Mount") + proto.RegisterType((*DeviceSpec)(nil), "hashicorp.nomad.plugins.device.DeviceSpec") + proto.RegisterType((*StatsRequest)(nil), "hashicorp.nomad.plugins.device.StatsRequest") + proto.RegisterType((*StatsResponse)(nil), "hashicorp.nomad.plugins.device.StatsResponse") + proto.RegisterType((*DeviceGroupStats)(nil), "hashicorp.nomad.plugins.device.DeviceGroupStats") + proto.RegisterMapType((map[string]*DeviceStats)(nil), "hashicorp.nomad.plugins.device.DeviceGroupStats.InstanceStatsEntry") + proto.RegisterType((*DeviceStats)(nil), "hashicorp.nomad.plugins.device.DeviceStats") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// DevicePluginClient is the client API for DevicePlugin service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DevicePluginClient interface { + // Fingerprint allows the device plugin to return a set of + // detected devices and provide a mechanism to update the state of + // the device. + Fingerprint(ctx context.Context, in *FingerprintRequest, opts ...grpc.CallOption) (DevicePlugin_FingerprintClient, error) + // Reserve is called by the client before starting an allocation + // that requires access to the plugin’s devices. The plugin can use + // this to run any setup steps and provides the mounting details to + // the Nomad client + Reserve(ctx context.Context, in *ReserveRequest, opts ...grpc.CallOption) (*ReserveResponse, error) + // Stats returns a stream of device statistics. + Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (DevicePlugin_StatsClient, error) +} + +type devicePluginClient struct { + cc *grpc.ClientConn +} + +func NewDevicePluginClient(cc *grpc.ClientConn) DevicePluginClient { + return &devicePluginClient{cc} +} + +func (c *devicePluginClient) Fingerprint(ctx context.Context, in *FingerprintRequest, opts ...grpc.CallOption) (DevicePlugin_FingerprintClient, error) { + stream, err := c.cc.NewStream(ctx, &_DevicePlugin_serviceDesc.Streams[0], "/hashicorp.nomad.plugins.device.DevicePlugin/Fingerprint", opts...) + if err != nil { + return nil, err + } + x := &devicePluginFingerprintClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DevicePlugin_FingerprintClient interface { + Recv() (*FingerprintResponse, error) + grpc.ClientStream +} + +type devicePluginFingerprintClient struct { + grpc.ClientStream +} + +func (x *devicePluginFingerprintClient) Recv() (*FingerprintResponse, error) { + m := new(FingerprintResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *devicePluginClient) Reserve(ctx context.Context, in *ReserveRequest, opts ...grpc.CallOption) (*ReserveResponse, error) { + out := new(ReserveResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.device.DevicePlugin/Reserve", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *devicePluginClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (DevicePlugin_StatsClient, error) { + stream, err := c.cc.NewStream(ctx, &_DevicePlugin_serviceDesc.Streams[1], "/hashicorp.nomad.plugins.device.DevicePlugin/Stats", opts...) + if err != nil { + return nil, err + } + x := &devicePluginStatsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DevicePlugin_StatsClient interface { + Recv() (*StatsResponse, error) + grpc.ClientStream +} + +type devicePluginStatsClient struct { + grpc.ClientStream +} + +func (x *devicePluginStatsClient) Recv() (*StatsResponse, error) { + m := new(StatsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// DevicePluginServer is the server API for DevicePlugin service. +type DevicePluginServer interface { + // Fingerprint allows the device plugin to return a set of + // detected devices and provide a mechanism to update the state of + // the device. + Fingerprint(*FingerprintRequest, DevicePlugin_FingerprintServer) error + // Reserve is called by the client before starting an allocation + // that requires access to the plugin’s devices. The plugin can use + // this to run any setup steps and provides the mounting details to + // the Nomad client + Reserve(context.Context, *ReserveRequest) (*ReserveResponse, error) + // Stats returns a stream of device statistics. + Stats(*StatsRequest, DevicePlugin_StatsServer) error +} + +func RegisterDevicePluginServer(s *grpc.Server, srv DevicePluginServer) { + s.RegisterService(&_DevicePlugin_serviceDesc, srv) +} + +func _DevicePlugin_Fingerprint_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(FingerprintRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DevicePluginServer).Fingerprint(m, &devicePluginFingerprintServer{stream}) +} + +type DevicePlugin_FingerprintServer interface { + Send(*FingerprintResponse) error + grpc.ServerStream +} + +type devicePluginFingerprintServer struct { + grpc.ServerStream +} + +func (x *devicePluginFingerprintServer) Send(m *FingerprintResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _DevicePlugin_Reserve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReserveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DevicePluginServer).Reserve(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.device.DevicePlugin/Reserve", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DevicePluginServer).Reserve(ctx, req.(*ReserveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DevicePlugin_Stats_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StatsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DevicePluginServer).Stats(m, &devicePluginStatsServer{stream}) +} + +type DevicePlugin_StatsServer interface { + Send(*StatsResponse) error + grpc.ServerStream +} + +type devicePluginStatsServer struct { + grpc.ServerStream +} + +func (x *devicePluginStatsServer) Send(m *StatsResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _DevicePlugin_serviceDesc = grpc.ServiceDesc{ + ServiceName: "hashicorp.nomad.plugins.device.DevicePlugin", + HandlerType: (*DevicePluginServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Reserve", + Handler: _DevicePlugin_Reserve_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Fingerprint", + Handler: _DevicePlugin_Fingerprint_Handler, + ServerStreams: true, + }, + { + StreamName: "Stats", + Handler: _DevicePlugin_Stats_Handler, + ServerStreams: true, + }, + }, + Metadata: "plugins/device/proto/device.proto", +} + +func init() { + proto.RegisterFile("plugins/device/proto/device.proto", fileDescriptor_device_a4d1cccedbd8401c) +} + +var fileDescriptor_device_a4d1cccedbd8401c = []byte{ + // 979 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xef, 0x8e, 0x1b, 0x35, + 0x10, 0x27, 0xb9, 0xcb, 0x25, 0x99, 0xdc, 0x5d, 0x8b, 0x7b, 0x42, 0x61, 0x81, 0xf6, 0x58, 0x09, + 0xa9, 0x02, 0xba, 0x29, 0x29, 0x12, 0x15, 0x08, 0xa4, 0xf6, 0x52, 0xee, 0xc2, 0x9f, 0x5e, 0xe5, + 0x56, 0x48, 0x2d, 0x12, 0x2b, 0x67, 0xd7, 0x64, 0xdd, 0xee, 0xda, 0x8b, 0xed, 0x4d, 0x15, 0x3e, + 0xf1, 0x38, 0x7c, 0xe1, 0x05, 0x78, 0x18, 0x3e, 0xf0, 0x24, 0x68, 0x6d, 0x6f, 0xb2, 0xf7, 0xa7, + 0x24, 0x69, 0x3f, 0xad, 0x3d, 0x33, 0xbf, 0x99, 0xf1, 0xcc, 0xcf, 0xe3, 0x85, 0x0f, 0xf3, 0xb4, + 0x98, 0x32, 0xae, 0x06, 0x31, 0x9d, 0xb1, 0x88, 0x0e, 0x72, 0x29, 0xb4, 0x70, 0x9b, 0xc0, 0x6c, + 0xd0, 0xf5, 0x84, 0xa8, 0x84, 0x45, 0x42, 0xe6, 0x01, 0x17, 0x19, 0x89, 0x03, 0x07, 0x09, 0xac, + 0x95, 0x77, 0x63, 0x2a, 0xc4, 0x34, 0x75, 0xd0, 0x49, 0xf1, 0xeb, 0x40, 0xb3, 0x8c, 0x2a, 0x4d, + 0xb2, 0xdc, 0x3a, 0xf0, 0xae, 0x9f, 0x37, 0x88, 0x0b, 0x49, 0x34, 0x13, 0xdc, 0xe9, 0x8f, 0xa7, + 0x4c, 0x27, 0xc5, 0x24, 0x88, 0x44, 0x36, 0x58, 0xc4, 0x1a, 0x98, 0x58, 0x83, 0x2a, 0x3d, 0x95, + 0x10, 0x49, 0xe3, 0x81, 0xd2, 0xb2, 0x88, 0xb4, 0x72, 0x69, 0x12, 0xad, 0x25, 0x9b, 0x14, 0xda, + 0x65, 0xea, 0x1d, 0xbd, 0xae, 0x23, 0xa5, 0x89, 0x56, 0xd6, 0x89, 0x7f, 0x00, 0xe8, 0x5b, 0xc6, + 0xa7, 0x54, 0xe6, 0x92, 0x71, 0x8d, 0xe9, 0x6f, 0x05, 0x55, 0xda, 0xa7, 0x70, 0xed, 0x8c, 0x54, + 0xe5, 0x82, 0x2b, 0x8a, 0x1e, 0xc2, 0xae, 0xad, 0x42, 0x38, 0x95, 0xa2, 0xc8, 0xfb, 0x8d, 0xc3, + 0xad, 0x9b, 0xbd, 0xe1, 0x27, 0xc1, 0xff, 0x97, 0x2c, 0x18, 0x99, 0xcf, 0x71, 0x09, 0xc1, 0xbd, + 0x78, 0xb9, 0xf1, 0xff, 0xd8, 0x82, 0x5e, 0x4d, 0x89, 0xde, 0x81, 0x9d, 0x19, 0xe5, 0xb1, 0x90, + 0xfd, 0xc6, 0x61, 0xe3, 0x66, 0x17, 0xbb, 0x1d, 0xba, 0x01, 0x0e, 0x16, 0xea, 0x79, 0x4e, 0xfb, + 0x4d, 0xa3, 0x04, 0x2b, 0x7a, 0x32, 0xcf, 0x69, 0xcd, 0x80, 0x93, 0x8c, 0xf6, 0xb7, 0xea, 0x06, + 0x0f, 0x49, 0x46, 0xd1, 0x09, 0xb4, 0xed, 0x4e, 0xf5, 0xb7, 0x4d, 0xd2, 0xc1, 0xea, 0xa4, 0x35, + 0x8d, 0x34, 0x8d, 0x6d, 0x7e, 0xb8, 0x82, 0xa3, 0x9f, 0x01, 0x16, 0x8d, 0x50, 0xfd, 0x96, 0x71, + 0xf6, 0xd5, 0x06, 0x15, 0x08, 0xee, 0x2d, 0xd0, 0x0f, 0xb8, 0x96, 0x73, 0x5c, 0x73, 0xe7, 0xe5, + 0x70, 0xe5, 0x9c, 0x1a, 0x5d, 0x85, 0xad, 0x17, 0x74, 0xee, 0x0a, 0x52, 0x2e, 0xd1, 0x31, 0xb4, + 0x66, 0x24, 0x2d, 0x6c, 0x1d, 0x7a, 0xc3, 0xcf, 0x5e, 0x19, 0xdc, 0x36, 0x3f, 0x70, 0xcd, 0x5f, + 0x06, 0xc6, 0x16, 0xff, 0x65, 0xf3, 0x6e, 0xc3, 0xff, 0xbb, 0x01, 0xfb, 0x67, 0x8f, 0x8a, 0xf6, + 0xa1, 0x39, 0x1e, 0xb9, 0x80, 0xcd, 0xf1, 0x08, 0xf5, 0xa1, 0x9d, 0x50, 0x92, 0xea, 0x64, 0x6e, + 0x22, 0x76, 0x70, 0xb5, 0x45, 0xb7, 0x00, 0xd9, 0x65, 0x18, 0x53, 0x15, 0x49, 0x96, 0x97, 0x34, + 0x77, 0xd5, 0x7f, 0xdb, 0x6a, 0x46, 0x4b, 0x05, 0x3a, 0x85, 0x5e, 0xf2, 0x32, 0x4c, 0x45, 0x44, + 0x52, 0xa6, 0xe7, 0xfd, 0x6d, 0x93, 0x7e, 0xb0, 0x5e, 0xed, 0x7e, 0x70, 0x28, 0x0c, 0xc9, 0xcb, + 0x6a, 0xed, 0x07, 0x65, 0xee, 0x75, 0x2d, 0x7a, 0x1f, 0x20, 0x8f, 0x58, 0x38, 0x29, 0x54, 0xc8, + 0x62, 0x77, 0x86, 0x4e, 0x1e, 0xb1, 0xfb, 0x85, 0x1a, 0xc7, 0xfe, 0x00, 0xf6, 0x31, 0x55, 0x54, + 0xce, 0xa8, 0x23, 0x3a, 0xfa, 0x00, 0x1c, 0x4b, 0x42, 0x16, 0x2b, 0xc3, 0xe7, 0x2e, 0xee, 0x5a, + 0xc9, 0x38, 0x56, 0x7e, 0x0a, 0x57, 0x16, 0x00, 0x77, 0x07, 0x9e, 0xc2, 0x5e, 0x24, 0xb8, 0x26, + 0x8c, 0x53, 0x19, 0x4a, 0xaa, 0x4c, 0x90, 0xde, 0xf0, 0xf3, 0x55, 0xc7, 0x38, 0xaa, 0x40, 0xd6, + 0xa1, 0x99, 0x08, 0x78, 0x37, 0xaa, 0x49, 0xfd, 0x3f, 0x9b, 0x70, 0x70, 0x99, 0x19, 0xc2, 0xb0, + 0x4d, 0xf9, 0x4c, 0xb9, 0xfb, 0xf6, 0xcd, 0xeb, 0x84, 0x0a, 0x1e, 0xf0, 0x99, 0x23, 0x9c, 0xf1, + 0x85, 0xbe, 0x86, 0x9d, 0x4c, 0x14, 0x5c, 0xab, 0x7e, 0xd3, 0x78, 0xfd, 0x68, 0x95, 0xd7, 0x1f, + 0x4b, 0x6b, 0xec, 0x40, 0x68, 0xb4, 0xbc, 0x50, 0x5b, 0x06, 0xff, 0xf1, 0x7a, 0x7d, 0x7c, 0x9c, + 0xd3, 0x68, 0x71, 0x99, 0xbc, 0x2f, 0xa0, 0xbb, 0xc8, 0xeb, 0x12, 0xa6, 0x1f, 0xd4, 0x99, 0xde, + 0xad, 0xd3, 0xf6, 0x17, 0x68, 0x99, 0x7c, 0xd0, 0x7b, 0xd0, 0xd5, 0x44, 0xbd, 0x08, 0x73, 0xa2, + 0x93, 0xaa, 0xdf, 0xa5, 0xe0, 0x11, 0xd1, 0x49, 0xa9, 0x4c, 0x84, 0xd2, 0x56, 0x69, 0x7d, 0x74, + 0x4a, 0x41, 0xa5, 0x94, 0x94, 0xc4, 0xa1, 0xe0, 0xe9, 0xdc, 0x70, 0xb6, 0x83, 0x3b, 0xa5, 0xe0, + 0x94, 0xa7, 0x73, 0x3f, 0x01, 0x58, 0xe6, 0xfb, 0x06, 0x41, 0x0e, 0xa1, 0x97, 0x53, 0x99, 0x31, + 0xa5, 0x98, 0xe0, 0xca, 0x5d, 0x8d, 0xba, 0xc8, 0x7f, 0x06, 0xbb, 0x8f, 0xcb, 0x79, 0x5c, 0x31, + 0xf2, 0x3b, 0xb8, 0x16, 0x89, 0x34, 0xa5, 0x51, 0xd9, 0xb5, 0x90, 0x71, 0x5d, 0x76, 0x30, 0x75, + 0x2c, 0x7b, 0x37, 0xb0, 0x8f, 0x4b, 0x50, 0x3d, 0x2e, 0xc1, 0xc8, 0x3d, 0x2e, 0x18, 0x2d, 0x51, + 0x63, 0x07, 0xf2, 0x9f, 0xc2, 0x9e, 0xf3, 0xed, 0xc8, 0x7b, 0x02, 0x3b, 0x66, 0x72, 0x57, 0x54, + 0xba, 0xbd, 0xc1, 0xe0, 0xb2, 0x9e, 0x1c, 0xde, 0xff, 0xab, 0x09, 0x57, 0xcf, 0x2b, 0x5f, 0x39, + 0xbf, 0x11, 0x6c, 0xd7, 0x06, 0xb7, 0x59, 0x97, 0xb2, 0xda, 0xac, 0x36, 0x6b, 0xf4, 0x1c, 0xf6, + 0x19, 0x57, 0x9a, 0xf0, 0x88, 0x86, 0xe6, 0x91, 0x72, 0xc3, 0xfa, 0x68, 0xd3, 0x34, 0x83, 0xb1, + 0x73, 0x63, 0x76, 0x96, 0xf6, 0x7b, 0xac, 0x2e, 0xf3, 0x32, 0x40, 0x17, 0x8d, 0x2e, 0xe1, 0xe0, + 0xbd, 0xb3, 0xd3, 0x76, 0xcd, 0xc7, 0xce, 0x16, 0xab, 0x46, 0xd8, 0x7f, 0x1a, 0xd5, 0x53, 0x67, + 0x4b, 0xf5, 0x3d, 0xb4, 0x55, 0x91, 0x65, 0x44, 0xce, 0x5d, 0x6b, 0xd7, 0x1e, 0xe3, 0x25, 0xfe, + 0xa7, 0xd2, 0x2f, 0xae, 0x3c, 0xa0, 0x13, 0x68, 0xd9, 0x72, 0xd9, 0x1c, 0x87, 0x9b, 0xb8, 0x3a, + 0x9d, 0x3c, 0xa7, 0x91, 0xc6, 0xd6, 0x01, 0xba, 0x0b, 0xdd, 0xc5, 0xff, 0x8c, 0x69, 0x4d, 0x6f, + 0xe8, 0x5d, 0xe0, 0xdc, 0x93, 0xca, 0x02, 0x2f, 0x8d, 0x87, 0xff, 0x36, 0x61, 0xd7, 0x1e, 0xf0, + 0x91, 0x09, 0x86, 0x7e, 0x87, 0x5e, 0xed, 0x1f, 0x02, 0x0d, 0x57, 0x15, 0xee, 0xe2, 0x6f, 0x88, + 0x77, 0x67, 0x23, 0x8c, 0xe5, 0xb8, 0xff, 0xd6, 0xed, 0x06, 0x4a, 0xa1, 0xed, 0xe6, 0x36, 0x5a, + 0xf9, 0xbe, 0x9c, 0x7d, 0x11, 0xbc, 0xc1, 0xda, 0xf6, 0x55, 0x3c, 0x94, 0x40, 0xcb, 0x36, 0xf5, + 0xd3, 0x55, 0xd8, 0xfa, 0x4d, 0xf7, 0x6e, 0xad, 0x69, 0xbd, 0x3c, 0xd7, 0xfd, 0xf6, 0xb3, 0x96, + 0xed, 0xc2, 0x8e, 0xf9, 0xdc, 0xf9, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x12, 0xb9, 0x76, 0xd8, 0xd1, + 0x0a, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/device/proto/device.proto b/vendor/github.com/hashicorp/nomad/plugins/device/proto/device.proto new file mode 100644 index 0000000..f551953 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/device/proto/device.proto @@ -0,0 +1,175 @@ +syntax = "proto3"; +package hashicorp.nomad.plugins.device; +option go_package = "proto"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; +import "github.com/hashicorp/nomad/plugins/shared/structs/proto/attribute.proto"; +import "github.com/hashicorp/nomad/plugins/shared/structs/proto/stats.proto"; + +// DevicePlugin is the API exposed by device plugins +service DevicePlugin { + // Fingerprint allows the device plugin to return a set of + // detected devices and provide a mechanism to update the state of + // the device. + rpc Fingerprint(FingerprintRequest) returns (stream FingerprintResponse) {} + + // Reserve is called by the client before starting an allocation + // that requires access to the plugin’s devices. The plugin can use + // this to run any setup steps and provides the mounting details to + // the Nomad client + rpc Reserve(ReserveRequest) returns (ReserveResponse) {} + + // Stats returns a stream of device statistics. + rpc Stats(StatsRequest) returns (stream StatsResponse) {} +} + +// FingerprintRequest is used to request for devices to be fingerprinted. +message FingerprintRequest {} + +// FingerprintResponse returns a set of detected devices. +message FingerprintResponse { + // device_group is a group of devices that share a vendor, device_type, and + // device_name. This is returned as a set so that a single plugin could + // potentially detect several device types and models. + repeated DeviceGroup device_group = 1; +} + +// DeviceGroup is a group of devices that share a vendor, device type and name. +message DeviceGroup { + // vendor is the name of the vendor of the device + string vendor = 1; + + // device_type is the type of the device (gpu, fpga, etc). + string device_type = 2; + + // device_name is the name of the device. + string device_name = 3; + + // devices is the set of devices detected by the plugin. + repeated DetectedDevice devices = 4; + + // attributes allows adding attributes to be used for constraints or + // affinities. + map attributes = 5; +} + +// DetectedDevice is a single detected device. +message DetectedDevice { + // ID is the ID of the device. This ID is used during allocation and must be + // stable across restarts of the device driver. + string ID = 1; + + // Health of the device. + bool healthy = 2; + + // health_description allows the device plugin to optionally + // annotate the health field with a human readable reason. + string health_description = 3; + + // hw_locality is optionally set to expose hardware locality information for + // more optimal placement decisions. + DeviceLocality hw_locality = 4; +} + +// DeviceLocality is used to expose HW locality information about a device. +message DeviceLocality { + // pci_bus_id is the PCI bus ID for the device. If reported, it + // allows Nomad to make NUMA aware optimizations. + string pci_bus_id = 1; +} + + +// ReserveRequest is used to ask the device driver for information on +// how to allocate the requested devices. +message ReserveRequest { + // device_ids are the requested devices. + repeated string device_ids = 1; +} + +// ReserveResponse informs Nomad how to expose the requested devices +// to the the task. +message ReserveResponse { + // container_res contains information on how to mount the device + // into a task isolated using container technologies (where the + // host is shared) + ContainerReservation container_res = 1; +} + +// ContainerReservation returns how to mount the device into a +// container that shares the host OS. +message ContainerReservation { + // List of environment variable to be set + map envs = 1; + + // Mounts for the task. + repeated Mount mounts = 2; + + // Devices for the task. + repeated DeviceSpec devices = 3; +} + +// Mount specifies a host volume to mount into a task. +// where device library or tools are installed on host and task +message Mount { + // Path of the mount within the task. + string task_path = 1; + + // Path of the mount on the host. + string host_path = 2; + + // If set, the mount is read-only. + bool read_only = 3; +} + +// DeviceSpec specifies a host device to mount into a task. +message DeviceSpec { + // Path of the device within the task. + string task_path = 1; + + // Path of the device on the host. + string host_path = 2; + + // Cgroups permissions of the device, candidates are one or more of + // * r - allows task to read from the specified device. + // * w - allows task to write to the specified device. + // * m - allows task to create device files that do not yet exist + string permissions = 3; +} + + +// StatsRequest is used to parameterize the retrieval of statistics. +message StatsRequest { + // collection_interval is the duration in which to collect statistics. + google.protobuf.Duration collection_interval = 1; +} + +// StatsResponse returns the statistics for each device group. +message StatsResponse { + // groups contains statistics for each device group. + repeated DeviceGroupStats groups = 1; +} + +// DeviceGroupStats contains statistics for each device of a particular +// device group, identified by the vendor, type and name of the device. +message DeviceGroupStats { + string vendor = 1; + string type = 2; + string name = 3; + + // instance_stats is a mapping of each device ID to its statistics. + map instance_stats = 4; +} + +// DeviceStats is the statistics for an individual device +message DeviceStats { + // summary exposes a single summary metric that should be the most + // informative to users. + hashicorp.nomad.plugins.shared.structs.StatValue summary = 1; + + // stats contains the verbose statistics for the device. + hashicorp.nomad.plugins.shared.structs.StatObject stats = 2; + + // timestamp is the time the statistics were collected. + google.protobuf.Timestamp timestamp = 3; +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/device/server.go b/vendor/github.com/hashicorp/nomad/plugins/device/server.go new file mode 100644 index 0000000..ead4922 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/device/server.go @@ -0,0 +1,119 @@ +package device + +import ( + "fmt" + "time" + + "github.com/golang/protobuf/ptypes" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugins/device/proto" + context "golang.org/x/net/context" +) + +// devicePluginServer wraps a device plugin and exposes it via gRPC. +type devicePluginServer struct { + broker *plugin.GRPCBroker + impl DevicePlugin +} + +func (d *devicePluginServer) Fingerprint(req *proto.FingerprintRequest, stream proto.DevicePlugin_FingerprintServer) error { + ctx := stream.Context() + outCh, err := d.impl.Fingerprint(ctx) + if err != nil { + return err + } + + for { + select { + case <-ctx.Done(): + return nil + case resp, ok := <-outCh: + // The output channel has been closed, end the stream + if !ok { + return nil + } + + // Handle any error + if resp.Error != nil { + return resp.Error + } + + // Convert the devices + out := convertStructDeviceGroups(resp.Devices) + + // Build the response + presp := &proto.FingerprintResponse{ + DeviceGroup: out, + } + + // Send the devices + if err := stream.Send(presp); err != nil { + return err + } + } + } +} + +func (d *devicePluginServer) Reserve(ctx context.Context, req *proto.ReserveRequest) (*proto.ReserveResponse, error) { + resp, err := d.impl.Reserve(req.GetDeviceIds()) + if err != nil { + return nil, err + } + + // Make the response + presp := &proto.ReserveResponse{ + ContainerRes: convertStructContainerReservation(resp), + } + + return presp, nil +} + +func (d *devicePluginServer) Stats(req *proto.StatsRequest, stream proto.DevicePlugin_StatsServer) error { + ctx := stream.Context() + + // Retrieve the collection interval + interval, err := ptypes.Duration(req.CollectionInterval) + if err != nil { + return fmt.Errorf("failed to parse collection interval: %v", err) + } + + // Default the duration if we get an invalid duration + if interval.Nanoseconds() == 0 { + interval = time.Second + } + + outCh, err := d.impl.Stats(ctx, interval) + if err != nil { + return err + } + + for { + select { + case <-ctx.Done(): + return nil + case resp, ok := <-outCh: + // The output channel has been closed, end the stream + if !ok { + return nil + } + + // Handle any error + if resp.Error != nil { + return resp.Error + } + + // Convert the devices + out := convertStructDeviceGroupsStats(resp.Groups) + + // Build the response + presp := &proto.StatsResponse{ + Groups: out, + } + + // Send the devices + if err := stream.Send(presp); err != nil { + return err + } + } + } +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/device/util.go b/vendor/github.com/hashicorp/nomad/plugins/device/util.go new file mode 100644 index 0000000..6eb226e --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/device/util.go @@ -0,0 +1,387 @@ +package device + +import ( + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/nomad/plugins/device/proto" + "github.com/hashicorp/nomad/plugins/shared/structs" +) + +// convertProtoDeviceGroups converts between a list of proto and structs DeviceGroup +func convertProtoDeviceGroups(in []*proto.DeviceGroup) []*DeviceGroup { + if in == nil { + return nil + } + + out := make([]*DeviceGroup, len(in)) + for i, group := range in { + out[i] = convertProtoDeviceGroup(group) + } + + return out +} + +// convertProtoDeviceGroup converts between a proto and structs DeviceGroup +func convertProtoDeviceGroup(in *proto.DeviceGroup) *DeviceGroup { + if in == nil { + return nil + } + + return &DeviceGroup{ + Vendor: in.Vendor, + Type: in.DeviceType, + Name: in.DeviceName, + Devices: convertProtoDevices(in.Devices), + Attributes: structs.ConvertProtoAttributeMap(in.Attributes), + } +} + +// convertProtoDevices converts between a list of proto and structs Device +func convertProtoDevices(in []*proto.DetectedDevice) []*Device { + if in == nil { + return nil + } + + out := make([]*Device, len(in)) + for i, d := range in { + out[i] = convertProtoDevice(d) + } + + return out +} + +// convertProtoDevice converts between a proto and structs Device +func convertProtoDevice(in *proto.DetectedDevice) *Device { + if in == nil { + return nil + } + + return &Device{ + ID: in.ID, + Healthy: in.Healthy, + HealthDesc: in.HealthDescription, + HwLocality: convertProtoDeviceLocality(in.HwLocality), + } +} + +// convertProtoDeviceLocality converts between a proto and structs DeviceLocality +func convertProtoDeviceLocality(in *proto.DeviceLocality) *DeviceLocality { + if in == nil { + return nil + } + + return &DeviceLocality{ + PciBusID: in.PciBusId, + } +} + +// convertProtoContainerReservation is used to convert between a proto and struct +// ContainerReservation +func convertProtoContainerReservation(in *proto.ContainerReservation) *ContainerReservation { + if in == nil { + return nil + } + + return &ContainerReservation{ + Envs: in.Envs, + Mounts: convertProtoMounts(in.Mounts), + Devices: convertProtoDeviceSpecs(in.Devices), + } +} + +// convertProtoMount converts between a list of proto and structs Mount +func convertProtoMounts(in []*proto.Mount) []*Mount { + if in == nil { + return nil + } + + out := make([]*Mount, len(in)) + for i, d := range in { + out[i] = convertProtoMount(d) + } + + return out +} + +// convertProtoMount converts between a proto and structs Mount +func convertProtoMount(in *proto.Mount) *Mount { + if in == nil { + return nil + } + + return &Mount{ + TaskPath: in.TaskPath, + HostPath: in.HostPath, + ReadOnly: in.ReadOnly, + } +} + +// convertProtoDeviceSpecs converts between a list of proto and structs DeviceSpecs +func convertProtoDeviceSpecs(in []*proto.DeviceSpec) []*DeviceSpec { + if in == nil { + return nil + } + + out := make([]*DeviceSpec, len(in)) + for i, d := range in { + out[i] = convertProtoDeviceSpec(d) + } + + return out +} + +// convertProtoDeviceSpec converts between a proto and structs DeviceSpec +func convertProtoDeviceSpec(in *proto.DeviceSpec) *DeviceSpec { + if in == nil { + return nil + } + + return &DeviceSpec{ + TaskPath: in.TaskPath, + HostPath: in.HostPath, + CgroupPerms: in.Permissions, + } +} + +// convertStructDeviceGroup converts between a list of struct and proto DeviceGroup +func convertStructDeviceGroups(in []*DeviceGroup) []*proto.DeviceGroup { + if in == nil { + return nil + } + + out := make([]*proto.DeviceGroup, len(in)) + for i, g := range in { + out[i] = convertStructDeviceGroup(g) + } + + return out +} + +// convertStructDeviceGroup converts between a struct and proto DeviceGroup +func convertStructDeviceGroup(in *DeviceGroup) *proto.DeviceGroup { + if in == nil { + return nil + } + + return &proto.DeviceGroup{ + Vendor: in.Vendor, + DeviceType: in.Type, + DeviceName: in.Name, + Devices: convertStructDevices(in.Devices), + Attributes: structs.ConvertStructAttributeMap(in.Attributes), + } +} + +// convertStructDevices converts between a list of struct and proto Device +func convertStructDevices(in []*Device) []*proto.DetectedDevice { + if in == nil { + return nil + } + + out := make([]*proto.DetectedDevice, len(in)) + for i, d := range in { + out[i] = convertStructDevice(d) + } + + return out +} + +// convertStructDevice converts between a struct and proto Device +func convertStructDevice(in *Device) *proto.DetectedDevice { + if in == nil { + return nil + } + + return &proto.DetectedDevice{ + ID: in.ID, + Healthy: in.Healthy, + HealthDescription: in.HealthDesc, + HwLocality: convertStructDeviceLocality(in.HwLocality), + } +} + +// convertStructDeviceLocality converts between a struct and proto DeviceLocality +func convertStructDeviceLocality(in *DeviceLocality) *proto.DeviceLocality { + if in == nil { + return nil + } + + return &proto.DeviceLocality{ + PciBusId: in.PciBusID, + } +} + +// convertStructContainerReservation is used to convert between a struct and +// proto ContainerReservation +func convertStructContainerReservation(in *ContainerReservation) *proto.ContainerReservation { + if in == nil { + return nil + } + + return &proto.ContainerReservation{ + Envs: in.Envs, + Mounts: convertStructMounts(in.Mounts), + Devices: convertStructDeviceSpecs(in.Devices), + } +} + +// convertStructMount converts between a list of structs and proto Mount +func convertStructMounts(in []*Mount) []*proto.Mount { + if in == nil { + return nil + } + + out := make([]*proto.Mount, len(in)) + for i, m := range in { + out[i] = convertStructMount(m) + } + + return out +} + +// convertStructMount converts between a struct and proto Mount +func convertStructMount(in *Mount) *proto.Mount { + if in == nil { + return nil + } + + return &proto.Mount{ + TaskPath: in.TaskPath, + HostPath: in.HostPath, + ReadOnly: in.ReadOnly, + } +} + +// convertStructDeviceSpecs converts between a list of struct and proto DeviceSpecs +func convertStructDeviceSpecs(in []*DeviceSpec) []*proto.DeviceSpec { + if in == nil { + return nil + } + + out := make([]*proto.DeviceSpec, len(in)) + for i, d := range in { + out[i] = convertStructDeviceSpec(d) + } + + return out +} + +// convertStructDeviceSpec converts between a struct and proto DeviceSpec +func convertStructDeviceSpec(in *DeviceSpec) *proto.DeviceSpec { + if in == nil { + return nil + } + + return &proto.DeviceSpec{ + TaskPath: in.TaskPath, + HostPath: in.HostPath, + Permissions: in.CgroupPerms, + } +} + +// convertProtoDeviceGroupsStats converts between a list of struct and proto +// DeviceGroupStats +func convertProtoDeviceGroupsStats(in []*proto.DeviceGroupStats) []*DeviceGroupStats { + if in == nil { + return nil + } + + out := make([]*DeviceGroupStats, len(in)) + for i, m := range in { + out[i] = convertProtoDeviceGroupStats(m) + } + + return out +} + +// convertProtoDeviceGroupStats converts between a proto and struct +// DeviceGroupStats +func convertProtoDeviceGroupStats(in *proto.DeviceGroupStats) *DeviceGroupStats { + if in == nil { + return nil + } + + out := &DeviceGroupStats{ + Vendor: in.Vendor, + Type: in.Type, + Name: in.Name, + InstanceStats: make(map[string]*DeviceStats, len(in.InstanceStats)), + } + + for k, v := range in.InstanceStats { + out.InstanceStats[k] = convertProtoDeviceStats(v) + } + + return out +} + +// convertProtoDeviceStats converts between a proto and struct DeviceStats +func convertProtoDeviceStats(in *proto.DeviceStats) *DeviceStats { + if in == nil { + return nil + } + + ts, err := ptypes.Timestamp(in.Timestamp) + if err != nil { + return nil + } + + return &DeviceStats{ + Summary: structs.ConvertProtoStatValue(in.Summary), + Stats: structs.ConvertProtoStatObject(in.Stats), + Timestamp: ts, + } +} + +// convertStructDeviceGroupsStats converts between a list of struct and proto +// DeviceGroupStats +func convertStructDeviceGroupsStats(in []*DeviceGroupStats) []*proto.DeviceGroupStats { + if in == nil { + return nil + } + + out := make([]*proto.DeviceGroupStats, len(in)) + for i, m := range in { + out[i] = convertStructDeviceGroupStats(m) + } + + return out +} + +// convertStructDeviceGroupStats converts between a struct and proto +// DeviceGroupStats +func convertStructDeviceGroupStats(in *DeviceGroupStats) *proto.DeviceGroupStats { + if in == nil { + return nil + } + + out := &proto.DeviceGroupStats{ + Vendor: in.Vendor, + Type: in.Type, + Name: in.Name, + InstanceStats: make(map[string]*proto.DeviceStats, len(in.InstanceStats)), + } + + for k, v := range in.InstanceStats { + out.InstanceStats[k] = convertStructDeviceStats(v) + } + + return out +} + +// convertStructDeviceStats converts between a struct and proto DeviceStats +func convertStructDeviceStats(in *DeviceStats) *proto.DeviceStats { + if in == nil { + return nil + } + + ts, err := ptypes.TimestampProto(in.Timestamp) + if err != nil { + return nil + } + + return &proto.DeviceStats{ + Summary: structs.ConvertStructStatValue(in.Summary), + Stats: structs.ConvertStructStatObject(in.Stats), + Timestamp: ts, + } +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/device/versions.go b/vendor/github.com/hashicorp/nomad/plugins/device/versions.go new file mode 100644 index 0000000..f10057f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/device/versions.go @@ -0,0 +1,6 @@ +package device + +const ( + // ApiVersion010 is the initial API version for the device plugins + ApiVersion010 = "v0.1.0" +) diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/client.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/client.go new file mode 100644 index 0000000..11babf2 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/client.go @@ -0,0 +1,396 @@ +package drivers + +import ( + "context" + "errors" + "io" + "time" + + "github.com/LK4D4/joincontext" + "github.com/golang/protobuf/ptypes" + hclog "github.com/hashicorp/go-hclog" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/helper/pluginutils/grpcutils" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/base" + "github.com/hashicorp/nomad/plugins/drivers/proto" + "github.com/hashicorp/nomad/plugins/shared/hclspec" + pstructs "github.com/hashicorp/nomad/plugins/shared/structs" + sproto "github.com/hashicorp/nomad/plugins/shared/structs/proto" + "google.golang.org/grpc/status" +) + +var _ DriverPlugin = &driverPluginClient{} + +type driverPluginClient struct { + *base.BasePluginClient + + client proto.DriverClient + logger hclog.Logger + + // doneCtx is closed when the plugin exits + doneCtx context.Context +} + +func (d *driverPluginClient) TaskConfigSchema() (*hclspec.Spec, error) { + req := &proto.TaskConfigSchemaRequest{} + + resp, err := d.client.TaskConfigSchema(d.doneCtx, req) + if err != nil { + return nil, grpcutils.HandleGrpcErr(err, d.doneCtx) + } + + return resp.Spec, nil +} + +func (d *driverPluginClient) Capabilities() (*Capabilities, error) { + req := &proto.CapabilitiesRequest{} + + resp, err := d.client.Capabilities(d.doneCtx, req) + if err != nil { + return nil, grpcutils.HandleGrpcErr(err, d.doneCtx) + } + + caps := &Capabilities{} + if resp.Capabilities != nil { + caps.SendSignals = resp.Capabilities.SendSignals + caps.Exec = resp.Capabilities.Exec + + switch resp.Capabilities.FsIsolation { + case proto.DriverCapabilities_NONE: + caps.FSIsolation = FSIsolationNone + case proto.DriverCapabilities_CHROOT: + caps.FSIsolation = FSIsolationChroot + case proto.DriverCapabilities_IMAGE: + caps.FSIsolation = FSIsolationImage + default: + caps.FSIsolation = FSIsolationNone + } + } + + return caps, nil +} + +// Fingerprint the driver, return a chan that will be pushed to periodically and on changes to health +func (d *driverPluginClient) Fingerprint(ctx context.Context) (<-chan *Fingerprint, error) { + req := &proto.FingerprintRequest{} + + // Join the passed context and the shutdown context + joinedCtx, _ := joincontext.Join(ctx, d.doneCtx) + + stream, err := d.client.Fingerprint(joinedCtx, req) + if err != nil { + return nil, grpcutils.HandleReqCtxGrpcErr(err, ctx, d.doneCtx) + } + + ch := make(chan *Fingerprint, 1) + go d.handleFingerprint(ctx, ch, stream) + + return ch, nil +} + +func (d *driverPluginClient) handleFingerprint(reqCtx context.Context, ch chan *Fingerprint, stream proto.Driver_FingerprintClient) { + defer close(ch) + for { + pb, err := stream.Recv() + if err != nil { + if err != io.EOF { + ch <- &Fingerprint{ + Err: grpcutils.HandleReqCtxGrpcErr(err, reqCtx, d.doneCtx), + } + } + + // End the stream + return + } + + f := &Fingerprint{ + Attributes: pstructs.ConvertProtoAttributeMap(pb.Attributes), + Health: healthStateFromProto(pb.Health), + HealthDescription: pb.HealthDescription, + } + + select { + case <-reqCtx.Done(): + return + case ch <- f: + } + } +} + +// RecoverTask does internal state recovery to be able to control the task of +// the given TaskHandle +func (d *driverPluginClient) RecoverTask(h *TaskHandle) error { + req := &proto.RecoverTaskRequest{Handle: taskHandleToProto(h)} + + _, err := d.client.RecoverTask(d.doneCtx, req) + return grpcutils.HandleGrpcErr(err, d.doneCtx) +} + +// StartTask starts execution of a task with the given TaskConfig. A TaskHandle +// is returned to the caller that can be used to recover state of the task, +// should the driver crash or exit prematurely. +func (d *driverPluginClient) StartTask(c *TaskConfig) (*TaskHandle, *DriverNetwork, error) { + req := &proto.StartTaskRequest{ + Task: taskConfigToProto(c), + } + + resp, err := d.client.StartTask(d.doneCtx, req) + if err != nil { + st := status.Convert(err) + if len(st.Details()) > 0 { + if rec, ok := st.Details()[0].(*sproto.RecoverableError); ok { + return nil, nil, structs.NewRecoverableError(err, rec.Recoverable) + } + } + return nil, nil, grpcutils.HandleGrpcErr(err, d.doneCtx) + } + + var net *DriverNetwork + if resp.NetworkOverride != nil { + net = &DriverNetwork{ + PortMap: map[string]int{}, + IP: resp.NetworkOverride.Addr, + AutoAdvertise: resp.NetworkOverride.AutoAdvertise, + } + for k, v := range resp.NetworkOverride.PortMap { + net.PortMap[k] = int(v) + } + } + + return taskHandleFromProto(resp.Handle), net, nil +} + +// WaitTask returns a channel that will have an ExitResult pushed to it once when the task +// exits on its own or is killed. If WaitTask is called after the task has exited, the channel +// will immedialy return the ExitResult. WaitTask can be called multiple times for +// the same task without issue. +func (d *driverPluginClient) WaitTask(ctx context.Context, id string) (<-chan *ExitResult, error) { + ch := make(chan *ExitResult) + go d.handleWaitTask(ctx, id, ch) + return ch, nil +} + +func (d *driverPluginClient) handleWaitTask(ctx context.Context, id string, ch chan *ExitResult) { + defer close(ch) + var result ExitResult + req := &proto.WaitTaskRequest{ + TaskId: id, + } + + // Join the passed context and the shutdown context + joinedCtx, _ := joincontext.Join(ctx, d.doneCtx) + + resp, err := d.client.WaitTask(joinedCtx, req) + if err != nil { + result.Err = grpcutils.HandleReqCtxGrpcErr(err, ctx, d.doneCtx) + } else { + result.ExitCode = int(resp.Result.ExitCode) + result.Signal = int(resp.Result.Signal) + result.OOMKilled = resp.Result.OomKilled + if len(resp.Err) > 0 { + result.Err = errors.New(resp.Err) + } + } + ch <- &result +} + +// StopTask stops the task with the given taskID. A timeout and signal can be +// given to control a graceful termination of the task. The driver will send the +// given signal to the task and wait for the given timeout for it to exit. If the +// task does not exit within the timeout it will be forcefully killed. +func (d *driverPluginClient) StopTask(taskID string, timeout time.Duration, signal string) error { + req := &proto.StopTaskRequest{ + TaskId: taskID, + Timeout: ptypes.DurationProto(timeout), + Signal: signal, + } + + _, err := d.client.StopTask(d.doneCtx, req) + return grpcutils.HandleGrpcErr(err, d.doneCtx) +} + +// DestroyTask removes the task from the driver's in memory state. The task +// cannot be running unless force is set to true. If force is set to true the +// driver will forcefully terminate the task before removing it. +func (d *driverPluginClient) DestroyTask(taskID string, force bool) error { + req := &proto.DestroyTaskRequest{ + TaskId: taskID, + Force: force, + } + + _, err := d.client.DestroyTask(d.doneCtx, req) + return grpcutils.HandleGrpcErr(err, d.doneCtx) +} + +// InspectTask returns status information for a task +func (d *driverPluginClient) InspectTask(taskID string) (*TaskStatus, error) { + req := &proto.InspectTaskRequest{TaskId: taskID} + + resp, err := d.client.InspectTask(d.doneCtx, req) + if err != nil { + return nil, grpcutils.HandleGrpcErr(err, d.doneCtx) + } + + status, err := taskStatusFromProto(resp.Task) + if err != nil { + return nil, err + } + + if resp.Driver != nil { + status.DriverAttributes = resp.Driver.Attributes + } + if resp.NetworkOverride != nil { + status.NetworkOverride = &DriverNetwork{ + PortMap: map[string]int{}, + IP: resp.NetworkOverride.Addr, + AutoAdvertise: resp.NetworkOverride.AutoAdvertise, + } + for k, v := range resp.NetworkOverride.PortMap { + status.NetworkOverride.PortMap[k] = int(v) + } + } + + return status, nil +} + +// TaskStats returns resource usage statistics for the task +func (d *driverPluginClient) TaskStats(ctx context.Context, taskID string, interval time.Duration) (<-chan *cstructs.TaskResourceUsage, error) { + req := &proto.TaskStatsRequest{ + TaskId: taskID, + CollectionInterval: ptypes.DurationProto(interval), + } + ctx, _ = joincontext.Join(ctx, d.doneCtx) + stream, err := d.client.TaskStats(ctx, req) + if err != nil { + st := status.Convert(err) + if len(st.Details()) > 0 { + if rec, ok := st.Details()[0].(*sproto.RecoverableError); ok { + return nil, structs.NewRecoverableError(err, rec.Recoverable) + } + } + return nil, err + } + + ch := make(chan *cstructs.TaskResourceUsage, 1) + go d.handleStats(ctx, ch, stream) + + return ch, nil +} + +func (d *driverPluginClient) handleStats(ctx context.Context, ch chan<- *cstructs.TaskResourceUsage, stream proto.Driver_TaskStatsClient) { + defer close(ch) + for { + resp, err := stream.Recv() + if ctx.Err() != nil { + // Context canceled; exit gracefully + return + } + + if err != nil { + if err != io.EOF { + d.logger.Error("error receiving stream from TaskStats driver RPC, closing stream", "error", err) + } + + // End of stream + return + } + + stats, err := TaskStatsFromProto(resp.Stats) + if err != nil { + d.logger.Error("failed to decode stats from RPC", "error", err, "stats", resp.Stats) + continue + } + + select { + case ch <- stats: + case <-ctx.Done(): + } + } +} + +// TaskEvents returns a channel that will receive events from the driver about all +// tasks such as lifecycle events, terminal errors, etc. +func (d *driverPluginClient) TaskEvents(ctx context.Context) (<-chan *TaskEvent, error) { + req := &proto.TaskEventsRequest{} + + // Join the passed context and the shutdown context + joinedCtx, _ := joincontext.Join(ctx, d.doneCtx) + + stream, err := d.client.TaskEvents(joinedCtx, req) + if err != nil { + return nil, grpcutils.HandleReqCtxGrpcErr(err, ctx, d.doneCtx) + } + + ch := make(chan *TaskEvent, 1) + go d.handleTaskEvents(ctx, ch, stream) + return ch, nil +} + +func (d *driverPluginClient) handleTaskEvents(reqCtx context.Context, ch chan *TaskEvent, stream proto.Driver_TaskEventsClient) { + defer close(ch) + for { + ev, err := stream.Recv() + if err != nil { + if err != io.EOF { + ch <- &TaskEvent{ + Err: grpcutils.HandleReqCtxGrpcErr(err, reqCtx, d.doneCtx), + } + } + + // End the stream + return + } + + timestamp, _ := ptypes.Timestamp(ev.Timestamp) + event := &TaskEvent{ + TaskID: ev.TaskId, + AllocID: ev.AllocId, + TaskName: ev.TaskName, + Annotations: ev.Annotations, + Message: ev.Message, + Timestamp: timestamp, + } + select { + case <-reqCtx.Done(): + return + case ch <- event: + } + } +} + +// SignalTask will send the given signal to the specified task +func (d *driverPluginClient) SignalTask(taskID string, signal string) error { + req := &proto.SignalTaskRequest{ + TaskId: taskID, + Signal: signal, + } + _, err := d.client.SignalTask(d.doneCtx, req) + return grpcutils.HandleGrpcErr(err, d.doneCtx) +} + +// ExecTask will run the given command within the execution context of the task. +// The driver will wait for the given timeout for the command to complete before +// terminating it. The stdout and stderr of the command will be return to the caller, +// along with other exit information such as exit code. +func (d *driverPluginClient) ExecTask(taskID string, cmd []string, timeout time.Duration) (*ExecTaskResult, error) { + req := &proto.ExecTaskRequest{ + TaskId: taskID, + Command: cmd, + Timeout: ptypes.DurationProto(timeout), + } + + resp, err := d.client.ExecTask(d.doneCtx, req) + if err != nil { + return nil, grpcutils.HandleGrpcErr(err, d.doneCtx) + } + + result := &ExecTaskResult{ + Stdout: resp.Stdout, + Stderr: resp.Stderr, + ExitResult: exitResultFromProto(resp.Result), + } + + return result, nil + +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/cstructs.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/cstructs.go new file mode 100644 index 0000000..75d3416 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/cstructs.go @@ -0,0 +1,29 @@ +package drivers + +import ( + cstructs "github.com/hashicorp/nomad/client/structs" +) + +// This files present an indirection layer to client structs used by drivers, +// and represent the public interface for drivers, as client interfaces are +// internal and subject to change. + +// MemoryStats holds memory usage related stats +type MemoryStats = cstructs.MemoryStats + +// CpuStats holds cpu usage related stats +type CpuStats = cstructs.CpuStats + +// ResourceUsage holds information related to cpu and memory stats +type ResourceUsage = cstructs.ResourceUsage + +// TaskResourceUsage holds aggregated resource usage of all processes in a Task +// and the resource usage of the individual pids +type TaskResourceUsage = cstructs.TaskResourceUsage + +// CheckBufSize is the size of the buffer that is used for job output +const CheckBufSize = cstructs.CheckBufSize + +// DriverStatsNotImplemented is the error to be returned if a driver doesn't +// implement stats. +var DriverStatsNotImplemented = cstructs.DriverStatsNotImplemented diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/driver.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/driver.go new file mode 100644 index 0000000..0af7264 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/driver.go @@ -0,0 +1,408 @@ +package drivers + +import ( + "context" + "crypto/md5" + "fmt" + "io" + "path/filepath" + "sort" + "strconv" + "time" + + "github.com/hashicorp/nomad/client/allocdir" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/base" + "github.com/hashicorp/nomad/plugins/shared/hclspec" + pstructs "github.com/hashicorp/nomad/plugins/shared/structs" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/msgpack" +) + +const ( + // DriverHealthy is the default health description that should be used + // if the driver is nominal + DriverHealthy = "Healthy" + + // Pre09TaskHandleVersion is the version used to identify that the task + // handle is from a driver that existed before driver plugins (v0.9). The + // driver should take appropriate action to handle the old driver state. + Pre09TaskHandleVersion = 0 +) + +// DriverPlugin is the interface with drivers will implement. It is also +// implemented by a plugin client which proxies the calls to go-plugin. See +// the proto/driver.proto file for detailed information about each RPC and +// message structure. +type DriverPlugin interface { + base.BasePlugin + + TaskConfigSchema() (*hclspec.Spec, error) + Capabilities() (*Capabilities, error) + Fingerprint(context.Context) (<-chan *Fingerprint, error) + + RecoverTask(*TaskHandle) error + StartTask(*TaskConfig) (*TaskHandle, *DriverNetwork, error) + WaitTask(ctx context.Context, taskID string) (<-chan *ExitResult, error) + StopTask(taskID string, timeout time.Duration, signal string) error + DestroyTask(taskID string, force bool) error + InspectTask(taskID string) (*TaskStatus, error) + TaskStats(ctx context.Context, taskID string, interval time.Duration) (<-chan *cstructs.TaskResourceUsage, error) + TaskEvents(context.Context) (<-chan *TaskEvent, error) + + SignalTask(taskID string, signal string) error + ExecTask(taskID string, cmd []string, timeout time.Duration) (*ExecTaskResult, error) +} + +// InternalDriverPlugin is an interface that exposes functions that are only +// implemented by internal driver plugins. +type InternalDriverPlugin interface { + // Shutdown allows the plugin to cleanup any running state to avoid leaking + // resources. It should not block. + Shutdown() +} + +// DriverSignalTaskNotSupported can be embedded by drivers which don't support +// the SignalTask RPC. This satisfies the SignalTask func requirement for the +// DriverPlugin interface. +type DriverSignalTaskNotSupported struct{} + +func (DriverSignalTaskNotSupported) SignalTask(taskID, signal string) error { + return fmt.Errorf("SignalTask is not supported by this driver") +} + +// DriverExecTaskNotSupported can be embedded by drivers which don't support +// the ExecTask RPC. This satisfies the ExecTask func requirement of the +// DriverPlugin interface. +type DriverExecTaskNotSupported struct{} + +func (_ DriverExecTaskNotSupported) ExecTask(taskID, signal string) error { + return fmt.Errorf("ExecTask is not supported by this driver") +} + +type HealthState string + +var ( + HealthStateUndetected = HealthState("undetected") + HealthStateUnhealthy = HealthState("unhealthy") + HealthStateHealthy = HealthState("healthy") +) + +type Fingerprint struct { + Attributes map[string]*pstructs.Attribute + Health HealthState + HealthDescription string + + // Err is set by the plugin if an error occurred during fingerprinting + Err error +} + +// FSIsolation is an enumeration to describe what kind of filesystem isolation +// a driver supports. +type FSIsolation string + +var ( + // FSIsolationNone means no isolation. The host filesystem is used. + FSIsolationNone = FSIsolation("none") + + // FSIsolationChroot means the driver will use a chroot on the host + // filesystem. + FSIsolationChroot = FSIsolation("chroot") + + // FSIsolationImage means the driver uses an image. + FSIsolationImage = FSIsolation("image") +) + +type Capabilities struct { + // SendSignals marks the driver as being able to send signals + SendSignals bool + + // Exec marks the driver as being able to execute arbitrary commands + // such as health checks. Used by the ScriptExecutor interface. + Exec bool + + //FSIsolation indicates what kind of filesystem isolation the driver supports. + FSIsolation FSIsolation +} + +type TaskConfig struct { + ID string + JobName string + TaskGroupName string + Name string + Env map[string]string + DeviceEnv map[string]string + Resources *Resources + Devices []*DeviceConfig + Mounts []*MountConfig + User string + AllocDir string + rawDriverConfig []byte + StdoutPath string + StderrPath string + AllocID string +} + +func (tc *TaskConfig) Copy() *TaskConfig { + if tc == nil { + return nil + } + c := new(TaskConfig) + *c = *tc + c.Env = helper.CopyMapStringString(c.Env) + c.DeviceEnv = helper.CopyMapStringString(c.DeviceEnv) + c.Resources = tc.Resources.Copy() + + if c.Devices != nil { + dc := make([]*DeviceConfig, len(c.Devices)) + for i, c := range c.Devices { + dc[i] = c.Copy() + } + c.Devices = dc + } + + if c.Mounts != nil { + mc := make([]*MountConfig, len(c.Mounts)) + for i, m := range c.Mounts { + mc[i] = m.Copy() + } + c.Mounts = mc + } + + return c +} + +func (tc *TaskConfig) EnvList() []string { + l := make([]string, 0, len(tc.Env)) + for k, v := range tc.Env { + l = append(l, k+"="+v) + } + + sort.Strings(l) + return l +} + +func (tc *TaskConfig) TaskDir() *allocdir.TaskDir { + taskDir := filepath.Join(tc.AllocDir, tc.Name) + return &allocdir.TaskDir{ + Dir: taskDir, + SharedAllocDir: filepath.Join(tc.AllocDir, allocdir.SharedAllocName), + LogDir: filepath.Join(tc.AllocDir, allocdir.SharedAllocName, allocdir.LogDirName), + SharedTaskDir: filepath.Join(taskDir, allocdir.SharedAllocName), + LocalDir: filepath.Join(taskDir, allocdir.TaskLocal), + SecretsDir: filepath.Join(taskDir, allocdir.TaskSecrets), + } +} + +func (tc *TaskConfig) DecodeDriverConfig(t interface{}) error { + return base.MsgPackDecode(tc.rawDriverConfig, t) +} + +func (tc *TaskConfig) EncodeDriverConfig(val cty.Value) error { + data, err := msgpack.Marshal(val, val.Type()) + if err != nil { + return err + } + + tc.rawDriverConfig = data + return nil +} + +func (tc *TaskConfig) EncodeConcreteDriverConfig(t interface{}) error { + data := []byte{} + err := base.MsgPackEncode(&data, t) + if err != nil { + return err + } + + tc.rawDriverConfig = data + return nil +} + +type Resources struct { + NomadResources *structs.AllocatedTaskResources + LinuxResources *LinuxResources +} + +func (r *Resources) Copy() *Resources { + if r == nil { + return nil + } + res := new(Resources) + if r.NomadResources != nil { + res.NomadResources = r.NomadResources.Copy() + } + if r.LinuxResources != nil { + res.LinuxResources = r.LinuxResources.Copy() + } + return res +} + +type LinuxResources struct { + CPUPeriod int64 + CPUQuota int64 + CPUShares int64 + MemoryLimitBytes int64 + OOMScoreAdj int64 + CpusetCPUs string + CpusetMems string + + // PrecentTicks is used to calculate the CPUQuota, currently the docker + // driver exposes cpu period and quota through the driver configuration + // and thus the calculation for CPUQuota cannot be done on the client. + // This is a capatability and should only be used by docker until the docker + // specific options are deprecated in favor of exposes CPUPeriod and + // CPUQuota at the task resource stanza. + PercentTicks float64 +} + +func (r *LinuxResources) Copy() *LinuxResources { + res := new(LinuxResources) + *res = *r + return res +} + +type DeviceConfig struct { + TaskPath string + HostPath string + Permissions string +} + +func (d *DeviceConfig) Copy() *DeviceConfig { + if d == nil { + return nil + } + + dc := new(DeviceConfig) + *dc = *d + return dc +} + +type MountConfig struct { + TaskPath string + HostPath string + Readonly bool +} + +func (m *MountConfig) Copy() *MountConfig { + if m == nil { + return nil + } + + mc := new(MountConfig) + *mc = *m + return mc +} + +const ( + TaskStateUnknown TaskState = "unknown" + TaskStateRunning TaskState = "running" + TaskStateExited TaskState = "exited" +) + +type TaskState string + +type ExitResult struct { + ExitCode int + Signal int + OOMKilled bool + Err error +} + +func (r *ExitResult) Successful() bool { + return r.ExitCode == 0 && r.Signal == 0 && r.Err == nil +} + +func (r *ExitResult) Copy() *ExitResult { + if r == nil { + return nil + } + res := new(ExitResult) + *res = *r + return res +} + +type TaskStatus struct { + ID string + Name string + State TaskState + StartedAt time.Time + CompletedAt time.Time + ExitResult *ExitResult + DriverAttributes map[string]string + NetworkOverride *DriverNetwork +} + +type TaskEvent struct { + TaskID string + TaskName string + AllocID string + Timestamp time.Time + Message string + Annotations map[string]string + + // Err is only used if an error occurred while consuming the RPC stream + Err error +} + +type ExecTaskResult struct { + Stdout []byte + Stderr []byte + ExitResult *ExitResult +} + +// DriverNetwork is the network created by driver's (eg Docker's bridge +// network) during Prestart. +type DriverNetwork struct { + // PortMap can be set by drivers to replace ports in environment + // variables with driver-specific mappings. + PortMap map[string]int + + // IP is the IP address for the task created by the driver. + IP string + + // AutoAdvertise indicates whether the driver thinks services that + // choose to auto-advertise-addresses should use this IP instead of the + // host's. eg If a Docker network plugin is used + AutoAdvertise bool +} + +// Advertise returns true if the driver suggests using the IP set. May be +// called on a nil Network in which case it returns false. +func (d *DriverNetwork) Advertise() bool { + return d != nil && d.AutoAdvertise +} + +// Copy a DriverNetwork struct. If it is nil, nil is returned. +func (d *DriverNetwork) Copy() *DriverNetwork { + if d == nil { + return nil + } + pm := make(map[string]int, len(d.PortMap)) + for k, v := range d.PortMap { + pm[k] = v + } + return &DriverNetwork{ + PortMap: pm, + IP: d.IP, + AutoAdvertise: d.AutoAdvertise, + } +} + +// Hash the contents of a DriverNetwork struct to detect changes. If it is nil, +// an empty slice is returned. +func (d *DriverNetwork) Hash() []byte { + if d == nil { + return []byte{} + } + h := md5.New() + io.WriteString(h, d.IP) + io.WriteString(h, strconv.FormatBool(d.AutoAdvertise)) + for k, v := range d.PortMap { + io.WriteString(h, k) + io.WriteString(h, strconv.Itoa(v)) + } + return h.Sum(nil) +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/errors.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/errors.go new file mode 100644 index 0000000..a89ffc6 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/errors.go @@ -0,0 +1,11 @@ +package drivers + +import "fmt" + +var ErrTaskNotFound = fmt.Errorf("task not found for given id") + +var DriverRequiresRootMessage = "Driver must run as root" + +var NoCgroupMountMessage = "Failed to discover cgroup mount point" + +var CgroupMountEmpty = "Cgroup mount point unavailable" diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/plugin.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/plugin.go new file mode 100644 index 0000000..e3e67c3 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/plugin.go @@ -0,0 +1,62 @@ +package drivers + +import ( + "context" + + hclog "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/plugins/base" + baseproto "github.com/hashicorp/nomad/plugins/base/proto" + "github.com/hashicorp/nomad/plugins/drivers/proto" + "google.golang.org/grpc" +) + +var _ plugin.GRPCPlugin = &PluginDriver{} + +// PluginDriver wraps a DriverPlugin and implements go-plugins GRPCPlugin +// interface to expose the the interface over gRPC +type PluginDriver struct { + plugin.NetRPCUnsupportedPlugin + impl DriverPlugin + logger hclog.Logger +} + +func NewDriverPlugin(d DriverPlugin, logger hclog.Logger) *PluginDriver { + return &PluginDriver{ + impl: d, + logger: logger, + } +} + +func (p *PluginDriver) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterDriverServer(s, &driverPluginServer{ + impl: p.impl, + broker: broker, + }) + return nil +} + +func (p *PluginDriver) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &driverPluginClient{ + BasePluginClient: &base.BasePluginClient{ + DoneCtx: ctx, + Client: baseproto.NewBasePluginClient(c), + }, + client: proto.NewDriverClient(c), + doneCtx: ctx, + logger: p.logger, + }, nil +} + +// Serve is used to serve a driverplugin +func Serve(d DriverPlugin, logger hclog.Logger) { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: base.Handshake, + Plugins: map[string]plugin.Plugin{ + base.PluginTypeBase: &base.PluginBase{Impl: d}, + base.PluginTypeDriver: &PluginDriver{impl: d, logger: logger}, + }, + GRPCServer: plugin.DefaultGRPCServer, + Logger: logger, + }) +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.pb.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.pb.go new file mode 100644 index 0000000..c972e4d --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.pb.go @@ -0,0 +1,3604 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/drivers/proto/driver.proto + +package proto + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import hclspec "github.com/hashicorp/nomad/plugins/shared/hclspec" +import proto1 "github.com/hashicorp/nomad/plugins/shared/structs/proto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type TaskState int32 + +const ( + TaskState_UNKNOWN TaskState = 0 + TaskState_RUNNING TaskState = 1 + TaskState_EXITED TaskState = 2 +) + +var TaskState_name = map[int32]string{ + 0: "UNKNOWN", + 1: "RUNNING", + 2: "EXITED", +} +var TaskState_value = map[string]int32{ + "UNKNOWN": 0, + "RUNNING": 1, + "EXITED": 2, +} + +func (x TaskState) String() string { + return proto.EnumName(TaskState_name, int32(x)) +} +func (TaskState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{0} +} + +type FingerprintResponse_HealthState int32 + +const ( + FingerprintResponse_UNDETECTED FingerprintResponse_HealthState = 0 + FingerprintResponse_UNHEALTHY FingerprintResponse_HealthState = 1 + FingerprintResponse_HEALTHY FingerprintResponse_HealthState = 2 +) + +var FingerprintResponse_HealthState_name = map[int32]string{ + 0: "UNDETECTED", + 1: "UNHEALTHY", + 2: "HEALTHY", +} +var FingerprintResponse_HealthState_value = map[string]int32{ + "UNDETECTED": 0, + "UNHEALTHY": 1, + "HEALTHY": 2, +} + +func (x FingerprintResponse_HealthState) String() string { + return proto.EnumName(FingerprintResponse_HealthState_name, int32(x)) +} +func (FingerprintResponse_HealthState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{5, 0} +} + +type StartTaskResponse_Result int32 + +const ( + StartTaskResponse_SUCCESS StartTaskResponse_Result = 0 + StartTaskResponse_RETRY StartTaskResponse_Result = 1 + StartTaskResponse_FATAL StartTaskResponse_Result = 2 +) + +var StartTaskResponse_Result_name = map[int32]string{ + 0: "SUCCESS", + 1: "RETRY", + 2: "FATAL", +} +var StartTaskResponse_Result_value = map[string]int32{ + "SUCCESS": 0, + "RETRY": 1, + "FATAL": 2, +} + +func (x StartTaskResponse_Result) String() string { + return proto.EnumName(StartTaskResponse_Result_name, int32(x)) +} +func (StartTaskResponse_Result) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{9, 0} +} + +type DriverCapabilities_FSIsolation int32 + +const ( + DriverCapabilities_NONE DriverCapabilities_FSIsolation = 0 + DriverCapabilities_CHROOT DriverCapabilities_FSIsolation = 1 + DriverCapabilities_IMAGE DriverCapabilities_FSIsolation = 2 +) + +var DriverCapabilities_FSIsolation_name = map[int32]string{ + 0: "NONE", + 1: "CHROOT", + 2: "IMAGE", +} +var DriverCapabilities_FSIsolation_value = map[string]int32{ + "NONE": 0, + "CHROOT": 1, + "IMAGE": 2, +} + +func (x DriverCapabilities_FSIsolation) String() string { + return proto.EnumName(DriverCapabilities_FSIsolation_name, int32(x)) +} +func (DriverCapabilities_FSIsolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{25, 0} +} + +type CPUUsage_Fields int32 + +const ( + CPUUsage_SYSTEM_MODE CPUUsage_Fields = 0 + CPUUsage_USER_MODE CPUUsage_Fields = 1 + CPUUsage_TOTAL_TICKS CPUUsage_Fields = 2 + CPUUsage_THROTTLED_PERIODS CPUUsage_Fields = 3 + CPUUsage_THROTTLED_TIME CPUUsage_Fields = 4 + CPUUsage_PERCENT CPUUsage_Fields = 5 +) + +var CPUUsage_Fields_name = map[int32]string{ + 0: "SYSTEM_MODE", + 1: "USER_MODE", + 2: "TOTAL_TICKS", + 3: "THROTTLED_PERIODS", + 4: "THROTTLED_TIME", + 5: "PERCENT", +} +var CPUUsage_Fields_value = map[string]int32{ + "SYSTEM_MODE": 0, + "USER_MODE": 1, + "TOTAL_TICKS": 2, + "THROTTLED_PERIODS": 3, + "THROTTLED_TIME": 4, + "PERCENT": 5, +} + +func (x CPUUsage_Fields) String() string { + return proto.EnumName(CPUUsage_Fields_name, int32(x)) +} +func (CPUUsage_Fields) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{43, 0} +} + +type MemoryUsage_Fields int32 + +const ( + MemoryUsage_RSS MemoryUsage_Fields = 0 + MemoryUsage_CACHE MemoryUsage_Fields = 1 + MemoryUsage_MAX_USAGE MemoryUsage_Fields = 2 + MemoryUsage_KERNEL_USAGE MemoryUsage_Fields = 3 + MemoryUsage_KERNEL_MAX_USAGE MemoryUsage_Fields = 4 + MemoryUsage_USAGE MemoryUsage_Fields = 5 + MemoryUsage_SWAP MemoryUsage_Fields = 6 +) + +var MemoryUsage_Fields_name = map[int32]string{ + 0: "RSS", + 1: "CACHE", + 2: "MAX_USAGE", + 3: "KERNEL_USAGE", + 4: "KERNEL_MAX_USAGE", + 5: "USAGE", + 6: "SWAP", +} +var MemoryUsage_Fields_value = map[string]int32{ + "RSS": 0, + "CACHE": 1, + "MAX_USAGE": 2, + "KERNEL_USAGE": 3, + "KERNEL_MAX_USAGE": 4, + "USAGE": 5, + "SWAP": 6, +} + +func (x MemoryUsage_Fields) String() string { + return proto.EnumName(MemoryUsage_Fields_name, int32(x)) +} +func (MemoryUsage_Fields) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{44, 0} +} + +type TaskConfigSchemaRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskConfigSchemaRequest) Reset() { *m = TaskConfigSchemaRequest{} } +func (m *TaskConfigSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*TaskConfigSchemaRequest) ProtoMessage() {} +func (*TaskConfigSchemaRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{0} +} +func (m *TaskConfigSchemaRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskConfigSchemaRequest.Unmarshal(m, b) +} +func (m *TaskConfigSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskConfigSchemaRequest.Marshal(b, m, deterministic) +} +func (dst *TaskConfigSchemaRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskConfigSchemaRequest.Merge(dst, src) +} +func (m *TaskConfigSchemaRequest) XXX_Size() int { + return xxx_messageInfo_TaskConfigSchemaRequest.Size(m) +} +func (m *TaskConfigSchemaRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TaskConfigSchemaRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskConfigSchemaRequest proto.InternalMessageInfo + +type TaskConfigSchemaResponse struct { + // Spec is the configuration schema for the job driver config stanza + Spec *hclspec.Spec `protobuf:"bytes,1,opt,name=spec,proto3" json:"spec,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskConfigSchemaResponse) Reset() { *m = TaskConfigSchemaResponse{} } +func (m *TaskConfigSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*TaskConfigSchemaResponse) ProtoMessage() {} +func (*TaskConfigSchemaResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{1} +} +func (m *TaskConfigSchemaResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskConfigSchemaResponse.Unmarshal(m, b) +} +func (m *TaskConfigSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskConfigSchemaResponse.Marshal(b, m, deterministic) +} +func (dst *TaskConfigSchemaResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskConfigSchemaResponse.Merge(dst, src) +} +func (m *TaskConfigSchemaResponse) XXX_Size() int { + return xxx_messageInfo_TaskConfigSchemaResponse.Size(m) +} +func (m *TaskConfigSchemaResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TaskConfigSchemaResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskConfigSchemaResponse proto.InternalMessageInfo + +func (m *TaskConfigSchemaResponse) GetSpec() *hclspec.Spec { + if m != nil { + return m.Spec + } + return nil +} + +type CapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CapabilitiesRequest) Reset() { *m = CapabilitiesRequest{} } +func (m *CapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*CapabilitiesRequest) ProtoMessage() {} +func (*CapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{2} +} +func (m *CapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CapabilitiesRequest.Unmarshal(m, b) +} +func (m *CapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CapabilitiesRequest.Marshal(b, m, deterministic) +} +func (dst *CapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapabilitiesRequest.Merge(dst, src) +} +func (m *CapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_CapabilitiesRequest.Size(m) +} +func (m *CapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CapabilitiesRequest proto.InternalMessageInfo + +type CapabilitiesResponse struct { + // Capabilities provides a way for the driver to denote if it implements + // non-core RPCs. Some Driver service RPCs expose additional information + // or functionality outside of the core task management functions. These + // RPCs are only implemented if the driver sets the corresponding capability. + Capabilities *DriverCapabilities `protobuf:"bytes,1,opt,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CapabilitiesResponse) Reset() { *m = CapabilitiesResponse{} } +func (m *CapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*CapabilitiesResponse) ProtoMessage() {} +func (*CapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{3} +} +func (m *CapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CapabilitiesResponse.Unmarshal(m, b) +} +func (m *CapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CapabilitiesResponse.Marshal(b, m, deterministic) +} +func (dst *CapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapabilitiesResponse.Merge(dst, src) +} +func (m *CapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_CapabilitiesResponse.Size(m) +} +func (m *CapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CapabilitiesResponse proto.InternalMessageInfo + +func (m *CapabilitiesResponse) GetCapabilities() *DriverCapabilities { + if m != nil { + return m.Capabilities + } + return nil +} + +type FingerprintRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FingerprintRequest) Reset() { *m = FingerprintRequest{} } +func (m *FingerprintRequest) String() string { return proto.CompactTextString(m) } +func (*FingerprintRequest) ProtoMessage() {} +func (*FingerprintRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{4} +} +func (m *FingerprintRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FingerprintRequest.Unmarshal(m, b) +} +func (m *FingerprintRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FingerprintRequest.Marshal(b, m, deterministic) +} +func (dst *FingerprintRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FingerprintRequest.Merge(dst, src) +} +func (m *FingerprintRequest) XXX_Size() int { + return xxx_messageInfo_FingerprintRequest.Size(m) +} +func (m *FingerprintRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FingerprintRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FingerprintRequest proto.InternalMessageInfo + +type FingerprintResponse struct { + // Attributes are key/value pairs that annotate the nomad client and can be + // used in scheduling contraints and affinities. + Attributes map[string]*proto1.Attribute `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Health is used to determine the state of the health the driver is in. + // Health can be one of the following states: + // * UNDETECTED: driver dependencies are not met and the driver can not start + // * UNHEALTHY: driver dependencies are met but the driver is unable to + // perform operations due to some other problem + // * HEALTHY: driver is able to perform all operations + Health FingerprintResponse_HealthState `protobuf:"varint,2,opt,name=health,proto3,enum=hashicorp.nomad.plugins.drivers.proto.FingerprintResponse_HealthState" json:"health,omitempty"` + // HealthDescription is a human readable message describing the current + // state of driver health + HealthDescription string `protobuf:"bytes,3,opt,name=health_description,json=healthDescription,proto3" json:"health_description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FingerprintResponse) Reset() { *m = FingerprintResponse{} } +func (m *FingerprintResponse) String() string { return proto.CompactTextString(m) } +func (*FingerprintResponse) ProtoMessage() {} +func (*FingerprintResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{5} +} +func (m *FingerprintResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FingerprintResponse.Unmarshal(m, b) +} +func (m *FingerprintResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FingerprintResponse.Marshal(b, m, deterministic) +} +func (dst *FingerprintResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FingerprintResponse.Merge(dst, src) +} +func (m *FingerprintResponse) XXX_Size() int { + return xxx_messageInfo_FingerprintResponse.Size(m) +} +func (m *FingerprintResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FingerprintResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FingerprintResponse proto.InternalMessageInfo + +func (m *FingerprintResponse) GetAttributes() map[string]*proto1.Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *FingerprintResponse) GetHealth() FingerprintResponse_HealthState { + if m != nil { + return m.Health + } + return FingerprintResponse_UNDETECTED +} + +func (m *FingerprintResponse) GetHealthDescription() string { + if m != nil { + return m.HealthDescription + } + return "" +} + +type RecoverTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Handle is the TaskHandle returned from StartTask + Handle *TaskHandle `protobuf:"bytes,2,opt,name=handle,proto3" json:"handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecoverTaskRequest) Reset() { *m = RecoverTaskRequest{} } +func (m *RecoverTaskRequest) String() string { return proto.CompactTextString(m) } +func (*RecoverTaskRequest) ProtoMessage() {} +func (*RecoverTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{6} +} +func (m *RecoverTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecoverTaskRequest.Unmarshal(m, b) +} +func (m *RecoverTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecoverTaskRequest.Marshal(b, m, deterministic) +} +func (dst *RecoverTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecoverTaskRequest.Merge(dst, src) +} +func (m *RecoverTaskRequest) XXX_Size() int { + return xxx_messageInfo_RecoverTaskRequest.Size(m) +} +func (m *RecoverTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RecoverTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RecoverTaskRequest proto.InternalMessageInfo + +func (m *RecoverTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *RecoverTaskRequest) GetHandle() *TaskHandle { + if m != nil { + return m.Handle + } + return nil +} + +type RecoverTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecoverTaskResponse) Reset() { *m = RecoverTaskResponse{} } +func (m *RecoverTaskResponse) String() string { return proto.CompactTextString(m) } +func (*RecoverTaskResponse) ProtoMessage() {} +func (*RecoverTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{7} +} +func (m *RecoverTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecoverTaskResponse.Unmarshal(m, b) +} +func (m *RecoverTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecoverTaskResponse.Marshal(b, m, deterministic) +} +func (dst *RecoverTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecoverTaskResponse.Merge(dst, src) +} +func (m *RecoverTaskResponse) XXX_Size() int { + return xxx_messageInfo_RecoverTaskResponse.Size(m) +} +func (m *RecoverTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RecoverTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RecoverTaskResponse proto.InternalMessageInfo + +type StartTaskRequest struct { + // Task configuration to launch + Task *TaskConfig `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartTaskRequest) Reset() { *m = StartTaskRequest{} } +func (m *StartTaskRequest) String() string { return proto.CompactTextString(m) } +func (*StartTaskRequest) ProtoMessage() {} +func (*StartTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{8} +} +func (m *StartTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartTaskRequest.Unmarshal(m, b) +} +func (m *StartTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartTaskRequest.Marshal(b, m, deterministic) +} +func (dst *StartTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartTaskRequest.Merge(dst, src) +} +func (m *StartTaskRequest) XXX_Size() int { + return xxx_messageInfo_StartTaskRequest.Size(m) +} +func (m *StartTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartTaskRequest proto.InternalMessageInfo + +func (m *StartTaskRequest) GetTask() *TaskConfig { + if m != nil { + return m.Task + } + return nil +} + +type StartTaskResponse struct { + // Result is set depending on the type of error that occurred while starting + // a task: + // + // * SUCCESS: No error occurred, handle is set + // * RETRY: An error occurred, but is recoverable and the RPC should be retried + // * FATAL: A fatal error occurred and is not likely to succeed if retried + // + // If Result is not successful, the DriverErrorMsg will be set. + Result StartTaskResponse_Result `protobuf:"varint,1,opt,name=result,proto3,enum=hashicorp.nomad.plugins.drivers.proto.StartTaskResponse_Result" json:"result,omitempty"` + // DriverErrorMsg is set if an error occurred + DriverErrorMsg string `protobuf:"bytes,2,opt,name=driver_error_msg,json=driverErrorMsg,proto3" json:"driver_error_msg,omitempty"` + // Handle is opaque to the client, but must be stored in order to recover + // the task. + Handle *TaskHandle `protobuf:"bytes,3,opt,name=handle,proto3" json:"handle,omitempty"` + // NetworkOverride is set if the driver sets network settings and the service ip/port + // needs to be set differently. + NetworkOverride *NetworkOverride `protobuf:"bytes,4,opt,name=network_override,json=networkOverride,proto3" json:"network_override,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartTaskResponse) Reset() { *m = StartTaskResponse{} } +func (m *StartTaskResponse) String() string { return proto.CompactTextString(m) } +func (*StartTaskResponse) ProtoMessage() {} +func (*StartTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{9} +} +func (m *StartTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartTaskResponse.Unmarshal(m, b) +} +func (m *StartTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartTaskResponse.Marshal(b, m, deterministic) +} +func (dst *StartTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartTaskResponse.Merge(dst, src) +} +func (m *StartTaskResponse) XXX_Size() int { + return xxx_messageInfo_StartTaskResponse.Size(m) +} +func (m *StartTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartTaskResponse proto.InternalMessageInfo + +func (m *StartTaskResponse) GetResult() StartTaskResponse_Result { + if m != nil { + return m.Result + } + return StartTaskResponse_SUCCESS +} + +func (m *StartTaskResponse) GetDriverErrorMsg() string { + if m != nil { + return m.DriverErrorMsg + } + return "" +} + +func (m *StartTaskResponse) GetHandle() *TaskHandle { + if m != nil { + return m.Handle + } + return nil +} + +func (m *StartTaskResponse) GetNetworkOverride() *NetworkOverride { + if m != nil { + return m.NetworkOverride + } + return nil +} + +type WaitTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WaitTaskRequest) Reset() { *m = WaitTaskRequest{} } +func (m *WaitTaskRequest) String() string { return proto.CompactTextString(m) } +func (*WaitTaskRequest) ProtoMessage() {} +func (*WaitTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{10} +} +func (m *WaitTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WaitTaskRequest.Unmarshal(m, b) +} +func (m *WaitTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WaitTaskRequest.Marshal(b, m, deterministic) +} +func (dst *WaitTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WaitTaskRequest.Merge(dst, src) +} +func (m *WaitTaskRequest) XXX_Size() int { + return xxx_messageInfo_WaitTaskRequest.Size(m) +} +func (m *WaitTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WaitTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WaitTaskRequest proto.InternalMessageInfo + +func (m *WaitTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +type WaitTaskResponse struct { + // Result is the exit status of the task + Result *ExitResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + // Err is set if any driver error occurred while waiting for the task + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WaitTaskResponse) Reset() { *m = WaitTaskResponse{} } +func (m *WaitTaskResponse) String() string { return proto.CompactTextString(m) } +func (*WaitTaskResponse) ProtoMessage() {} +func (*WaitTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{11} +} +func (m *WaitTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WaitTaskResponse.Unmarshal(m, b) +} +func (m *WaitTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WaitTaskResponse.Marshal(b, m, deterministic) +} +func (dst *WaitTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WaitTaskResponse.Merge(dst, src) +} +func (m *WaitTaskResponse) XXX_Size() int { + return xxx_messageInfo_WaitTaskResponse.Size(m) +} +func (m *WaitTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WaitTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WaitTaskResponse proto.InternalMessageInfo + +func (m *WaitTaskResponse) GetResult() *ExitResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *WaitTaskResponse) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + +type StopTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Timeout defines the amount of time to wait before forcefully killing + // the task. For example, on Unix clients, this means sending a SIGKILL to + // the process. + Timeout *duration.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"` + // Signal can be set to override the Task's configured shutdown signal + Signal string `protobuf:"bytes,3,opt,name=signal,proto3" json:"signal,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopTaskRequest) Reset() { *m = StopTaskRequest{} } +func (m *StopTaskRequest) String() string { return proto.CompactTextString(m) } +func (*StopTaskRequest) ProtoMessage() {} +func (*StopTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{12} +} +func (m *StopTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopTaskRequest.Unmarshal(m, b) +} +func (m *StopTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopTaskRequest.Marshal(b, m, deterministic) +} +func (dst *StopTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopTaskRequest.Merge(dst, src) +} +func (m *StopTaskRequest) XXX_Size() int { + return xxx_messageInfo_StopTaskRequest.Size(m) +} +func (m *StopTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopTaskRequest proto.InternalMessageInfo + +func (m *StopTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *StopTaskRequest) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *StopTaskRequest) GetSignal() string { + if m != nil { + return m.Signal + } + return "" +} + +type StopTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopTaskResponse) Reset() { *m = StopTaskResponse{} } +func (m *StopTaskResponse) String() string { return proto.CompactTextString(m) } +func (*StopTaskResponse) ProtoMessage() {} +func (*StopTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{13} +} +func (m *StopTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopTaskResponse.Unmarshal(m, b) +} +func (m *StopTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopTaskResponse.Marshal(b, m, deterministic) +} +func (dst *StopTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopTaskResponse.Merge(dst, src) +} +func (m *StopTaskResponse) XXX_Size() int { + return xxx_messageInfo_StopTaskResponse.Size(m) +} +func (m *StopTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopTaskResponse proto.InternalMessageInfo + +type DestroyTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Force destroys the task even if it is still in a running state + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DestroyTaskRequest) Reset() { *m = DestroyTaskRequest{} } +func (m *DestroyTaskRequest) String() string { return proto.CompactTextString(m) } +func (*DestroyTaskRequest) ProtoMessage() {} +func (*DestroyTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{14} +} +func (m *DestroyTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DestroyTaskRequest.Unmarshal(m, b) +} +func (m *DestroyTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DestroyTaskRequest.Marshal(b, m, deterministic) +} +func (dst *DestroyTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DestroyTaskRequest.Merge(dst, src) +} +func (m *DestroyTaskRequest) XXX_Size() int { + return xxx_messageInfo_DestroyTaskRequest.Size(m) +} +func (m *DestroyTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DestroyTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DestroyTaskRequest proto.InternalMessageInfo + +func (m *DestroyTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *DestroyTaskRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +type DestroyTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DestroyTaskResponse) Reset() { *m = DestroyTaskResponse{} } +func (m *DestroyTaskResponse) String() string { return proto.CompactTextString(m) } +func (*DestroyTaskResponse) ProtoMessage() {} +func (*DestroyTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{15} +} +func (m *DestroyTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DestroyTaskResponse.Unmarshal(m, b) +} +func (m *DestroyTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DestroyTaskResponse.Marshal(b, m, deterministic) +} +func (dst *DestroyTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DestroyTaskResponse.Merge(dst, src) +} +func (m *DestroyTaskResponse) XXX_Size() int { + return xxx_messageInfo_DestroyTaskResponse.Size(m) +} +func (m *DestroyTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DestroyTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DestroyTaskResponse proto.InternalMessageInfo + +type InspectTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InspectTaskRequest) Reset() { *m = InspectTaskRequest{} } +func (m *InspectTaskRequest) String() string { return proto.CompactTextString(m) } +func (*InspectTaskRequest) ProtoMessage() {} +func (*InspectTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{16} +} +func (m *InspectTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InspectTaskRequest.Unmarshal(m, b) +} +func (m *InspectTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InspectTaskRequest.Marshal(b, m, deterministic) +} +func (dst *InspectTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InspectTaskRequest.Merge(dst, src) +} +func (m *InspectTaskRequest) XXX_Size() int { + return xxx_messageInfo_InspectTaskRequest.Size(m) +} +func (m *InspectTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InspectTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InspectTaskRequest proto.InternalMessageInfo + +func (m *InspectTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +type InspectTaskResponse struct { + // Task details + Task *TaskStatus `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + // Driver details for task + Driver *TaskDriverStatus `protobuf:"bytes,2,opt,name=driver,proto3" json:"driver,omitempty"` + // NetworkOverride info if set + NetworkOverride *NetworkOverride `protobuf:"bytes,3,opt,name=network_override,json=networkOverride,proto3" json:"network_override,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InspectTaskResponse) Reset() { *m = InspectTaskResponse{} } +func (m *InspectTaskResponse) String() string { return proto.CompactTextString(m) } +func (*InspectTaskResponse) ProtoMessage() {} +func (*InspectTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{17} +} +func (m *InspectTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InspectTaskResponse.Unmarshal(m, b) +} +func (m *InspectTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InspectTaskResponse.Marshal(b, m, deterministic) +} +func (dst *InspectTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InspectTaskResponse.Merge(dst, src) +} +func (m *InspectTaskResponse) XXX_Size() int { + return xxx_messageInfo_InspectTaskResponse.Size(m) +} +func (m *InspectTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InspectTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InspectTaskResponse proto.InternalMessageInfo + +func (m *InspectTaskResponse) GetTask() *TaskStatus { + if m != nil { + return m.Task + } + return nil +} + +func (m *InspectTaskResponse) GetDriver() *TaskDriverStatus { + if m != nil { + return m.Driver + } + return nil +} + +func (m *InspectTaskResponse) GetNetworkOverride() *NetworkOverride { + if m != nil { + return m.NetworkOverride + } + return nil +} + +type TaskStatsRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // CollectionInterval is the interval at which to stream stats to the caller + CollectionInterval *duration.Duration `protobuf:"bytes,2,opt,name=collection_interval,json=collectionInterval,proto3" json:"collection_interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskStatsRequest) Reset() { *m = TaskStatsRequest{} } +func (m *TaskStatsRequest) String() string { return proto.CompactTextString(m) } +func (*TaskStatsRequest) ProtoMessage() {} +func (*TaskStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{18} +} +func (m *TaskStatsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskStatsRequest.Unmarshal(m, b) +} +func (m *TaskStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskStatsRequest.Marshal(b, m, deterministic) +} +func (dst *TaskStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskStatsRequest.Merge(dst, src) +} +func (m *TaskStatsRequest) XXX_Size() int { + return xxx_messageInfo_TaskStatsRequest.Size(m) +} +func (m *TaskStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TaskStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskStatsRequest proto.InternalMessageInfo + +func (m *TaskStatsRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *TaskStatsRequest) GetCollectionInterval() *duration.Duration { + if m != nil { + return m.CollectionInterval + } + return nil +} + +type TaskStatsResponse struct { + // Stats for the task + Stats *TaskStats `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskStatsResponse) Reset() { *m = TaskStatsResponse{} } +func (m *TaskStatsResponse) String() string { return proto.CompactTextString(m) } +func (*TaskStatsResponse) ProtoMessage() {} +func (*TaskStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{19} +} +func (m *TaskStatsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskStatsResponse.Unmarshal(m, b) +} +func (m *TaskStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskStatsResponse.Marshal(b, m, deterministic) +} +func (dst *TaskStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskStatsResponse.Merge(dst, src) +} +func (m *TaskStatsResponse) XXX_Size() int { + return xxx_messageInfo_TaskStatsResponse.Size(m) +} +func (m *TaskStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TaskStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskStatsResponse proto.InternalMessageInfo + +func (m *TaskStatsResponse) GetStats() *TaskStats { + if m != nil { + return m.Stats + } + return nil +} + +type TaskEventsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskEventsRequest) Reset() { *m = TaskEventsRequest{} } +func (m *TaskEventsRequest) String() string { return proto.CompactTextString(m) } +func (*TaskEventsRequest) ProtoMessage() {} +func (*TaskEventsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{20} +} +func (m *TaskEventsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskEventsRequest.Unmarshal(m, b) +} +func (m *TaskEventsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskEventsRequest.Marshal(b, m, deterministic) +} +func (dst *TaskEventsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskEventsRequest.Merge(dst, src) +} +func (m *TaskEventsRequest) XXX_Size() int { + return xxx_messageInfo_TaskEventsRequest.Size(m) +} +func (m *TaskEventsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TaskEventsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskEventsRequest proto.InternalMessageInfo + +type SignalTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Signal is the operating system signal to send to the task. Ex: SIGHUP + Signal string `protobuf:"bytes,2,opt,name=signal,proto3" json:"signal,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignalTaskRequest) Reset() { *m = SignalTaskRequest{} } +func (m *SignalTaskRequest) String() string { return proto.CompactTextString(m) } +func (*SignalTaskRequest) ProtoMessage() {} +func (*SignalTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{21} +} +func (m *SignalTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignalTaskRequest.Unmarshal(m, b) +} +func (m *SignalTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignalTaskRequest.Marshal(b, m, deterministic) +} +func (dst *SignalTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignalTaskRequest.Merge(dst, src) +} +func (m *SignalTaskRequest) XXX_Size() int { + return xxx_messageInfo_SignalTaskRequest.Size(m) +} +func (m *SignalTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignalTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignalTaskRequest proto.InternalMessageInfo + +func (m *SignalTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *SignalTaskRequest) GetSignal() string { + if m != nil { + return m.Signal + } + return "" +} + +type SignalTaskResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignalTaskResponse) Reset() { *m = SignalTaskResponse{} } +func (m *SignalTaskResponse) String() string { return proto.CompactTextString(m) } +func (*SignalTaskResponse) ProtoMessage() {} +func (*SignalTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{22} +} +func (m *SignalTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignalTaskResponse.Unmarshal(m, b) +} +func (m *SignalTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignalTaskResponse.Marshal(b, m, deterministic) +} +func (dst *SignalTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignalTaskResponse.Merge(dst, src) +} +func (m *SignalTaskResponse) XXX_Size() int { + return xxx_messageInfo_SignalTaskResponse.Size(m) +} +func (m *SignalTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignalTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignalTaskResponse proto.InternalMessageInfo + +type ExecTaskRequest struct { + // TaskId is the ID of the target task + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Command is the command to execute in the task environment + Command []string `protobuf:"bytes,2,rep,name=command,proto3" json:"command,omitempty"` + // Timeout is the amount of time to wait for the command to stop. + // Defaults to 0 (run forever) + Timeout *duration.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecTaskRequest) Reset() { *m = ExecTaskRequest{} } +func (m *ExecTaskRequest) String() string { return proto.CompactTextString(m) } +func (*ExecTaskRequest) ProtoMessage() {} +func (*ExecTaskRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{23} +} +func (m *ExecTaskRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecTaskRequest.Unmarshal(m, b) +} +func (m *ExecTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecTaskRequest.Marshal(b, m, deterministic) +} +func (dst *ExecTaskRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecTaskRequest.Merge(dst, src) +} +func (m *ExecTaskRequest) XXX_Size() int { + return xxx_messageInfo_ExecTaskRequest.Size(m) +} +func (m *ExecTaskRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecTaskRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecTaskRequest proto.InternalMessageInfo + +func (m *ExecTaskRequest) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *ExecTaskRequest) GetCommand() []string { + if m != nil { + return m.Command + } + return nil +} + +func (m *ExecTaskRequest) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +type ExecTaskResponse struct { + // Stdout from the exec + Stdout []byte `protobuf:"bytes,1,opt,name=stdout,proto3" json:"stdout,omitempty"` + // Stderr from the exec + Stderr []byte `protobuf:"bytes,2,opt,name=stderr,proto3" json:"stderr,omitempty"` + // Result from the exec + Result *ExitResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecTaskResponse) Reset() { *m = ExecTaskResponse{} } +func (m *ExecTaskResponse) String() string { return proto.CompactTextString(m) } +func (*ExecTaskResponse) ProtoMessage() {} +func (*ExecTaskResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{24} +} +func (m *ExecTaskResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExecTaskResponse.Unmarshal(m, b) +} +func (m *ExecTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExecTaskResponse.Marshal(b, m, deterministic) +} +func (dst *ExecTaskResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecTaskResponse.Merge(dst, src) +} +func (m *ExecTaskResponse) XXX_Size() int { + return xxx_messageInfo_ExecTaskResponse.Size(m) +} +func (m *ExecTaskResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecTaskResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecTaskResponse proto.InternalMessageInfo + +func (m *ExecTaskResponse) GetStdout() []byte { + if m != nil { + return m.Stdout + } + return nil +} + +func (m *ExecTaskResponse) GetStderr() []byte { + if m != nil { + return m.Stderr + } + return nil +} + +func (m *ExecTaskResponse) GetResult() *ExitResult { + if m != nil { + return m.Result + } + return nil +} + +type DriverCapabilities struct { + // SendSignals indicates that the driver can send process signals (ex. SIGUSR1) + // to the task. + SendSignals bool `protobuf:"varint,1,opt,name=send_signals,json=sendSignals,proto3" json:"send_signals,omitempty"` + // Exec indicates that the driver supports executing arbitrary commands + // in the task's execution environment. + Exec bool `protobuf:"varint,2,opt,name=exec,proto3" json:"exec,omitempty"` + // FsIsolation indicates what kind of filesystem isolation a driver supports. + FsIsolation DriverCapabilities_FSIsolation `protobuf:"varint,3,opt,name=fs_isolation,json=fsIsolation,proto3,enum=hashicorp.nomad.plugins.drivers.proto.DriverCapabilities_FSIsolation" json:"fs_isolation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DriverCapabilities) Reset() { *m = DriverCapabilities{} } +func (m *DriverCapabilities) String() string { return proto.CompactTextString(m) } +func (*DriverCapabilities) ProtoMessage() {} +func (*DriverCapabilities) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{25} +} +func (m *DriverCapabilities) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DriverCapabilities.Unmarshal(m, b) +} +func (m *DriverCapabilities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DriverCapabilities.Marshal(b, m, deterministic) +} +func (dst *DriverCapabilities) XXX_Merge(src proto.Message) { + xxx_messageInfo_DriverCapabilities.Merge(dst, src) +} +func (m *DriverCapabilities) XXX_Size() int { + return xxx_messageInfo_DriverCapabilities.Size(m) +} +func (m *DriverCapabilities) XXX_DiscardUnknown() { + xxx_messageInfo_DriverCapabilities.DiscardUnknown(m) +} + +var xxx_messageInfo_DriverCapabilities proto.InternalMessageInfo + +func (m *DriverCapabilities) GetSendSignals() bool { + if m != nil { + return m.SendSignals + } + return false +} + +func (m *DriverCapabilities) GetExec() bool { + if m != nil { + return m.Exec + } + return false +} + +func (m *DriverCapabilities) GetFsIsolation() DriverCapabilities_FSIsolation { + if m != nil { + return m.FsIsolation + } + return DriverCapabilities_NONE +} + +type TaskConfig struct { + // Id of the task, recommended to the globally unique, must be unique to the driver. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Name of the task + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // MsgpackDriverConfig is the encoded driver configuation of the task + MsgpackDriverConfig []byte `protobuf:"bytes,3,opt,name=msgpack_driver_config,json=msgpackDriverConfig,proto3" json:"msgpack_driver_config,omitempty"` + // Env is the a set of key/value pairs to be set as environment variables + Env map[string]string `protobuf:"bytes,4,rep,name=env,proto3" json:"env,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // DeviceEnv is the set of environment variables that are defined by device + // plugins. This allows the driver to differentiate environment variables + // set by the device plugins and those by the user. When populating the + // task's environment env should be used. + DeviceEnv map[string]string `protobuf:"bytes,5,rep,name=device_env,json=deviceEnv,proto3" json:"device_env,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Resources defines the resources to isolate + Resources *Resources `protobuf:"bytes,6,opt,name=resources,proto3" json:"resources,omitempty"` + // Mounts is a list of targets to bind mount into the task directory + Mounts []*Mount `protobuf:"bytes,7,rep,name=mounts,proto3" json:"mounts,omitempty"` + // Devices is a list of system devices to mount into the task's execution + // environment. + Devices []*Device `protobuf:"bytes,8,rep,name=devices,proto3" json:"devices,omitempty"` + // User defines the operating system user the tasks should run as + User string `protobuf:"bytes,9,opt,name=user,proto3" json:"user,omitempty"` + // AllocDir is the directory on the host where the allocation directory + // exists. + AllocDir string `protobuf:"bytes,10,opt,name=alloc_dir,json=allocDir,proto3" json:"alloc_dir,omitempty"` + // StdoutPath is the path to the file to open and write task stdout to + StdoutPath string `protobuf:"bytes,11,opt,name=stdout_path,json=stdoutPath,proto3" json:"stdout_path,omitempty"` + // StderrPath is the path to the file to open and write task stderr to + StderrPath string `protobuf:"bytes,12,opt,name=stderr_path,json=stderrPath,proto3" json:"stderr_path,omitempty"` + // TaskGroupName is the name of the task group which this task is a member of + TaskGroupName string `protobuf:"bytes,13,opt,name=task_group_name,json=taskGroupName,proto3" json:"task_group_name,omitempty"` + // JobName is the name of the job of which this task is part of + JobName string `protobuf:"bytes,14,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` + // AllocId is the ID of the associated allocation + AllocId string `protobuf:"bytes,15,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskConfig) Reset() { *m = TaskConfig{} } +func (m *TaskConfig) String() string { return proto.CompactTextString(m) } +func (*TaskConfig) ProtoMessage() {} +func (*TaskConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{26} +} +func (m *TaskConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskConfig.Unmarshal(m, b) +} +func (m *TaskConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskConfig.Marshal(b, m, deterministic) +} +func (dst *TaskConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskConfig.Merge(dst, src) +} +func (m *TaskConfig) XXX_Size() int { + return xxx_messageInfo_TaskConfig.Size(m) +} +func (m *TaskConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TaskConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskConfig proto.InternalMessageInfo + +func (m *TaskConfig) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *TaskConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TaskConfig) GetMsgpackDriverConfig() []byte { + if m != nil { + return m.MsgpackDriverConfig + } + return nil +} + +func (m *TaskConfig) GetEnv() map[string]string { + if m != nil { + return m.Env + } + return nil +} + +func (m *TaskConfig) GetDeviceEnv() map[string]string { + if m != nil { + return m.DeviceEnv + } + return nil +} + +func (m *TaskConfig) GetResources() *Resources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *TaskConfig) GetMounts() []*Mount { + if m != nil { + return m.Mounts + } + return nil +} + +func (m *TaskConfig) GetDevices() []*Device { + if m != nil { + return m.Devices + } + return nil +} + +func (m *TaskConfig) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *TaskConfig) GetAllocDir() string { + if m != nil { + return m.AllocDir + } + return "" +} + +func (m *TaskConfig) GetStdoutPath() string { + if m != nil { + return m.StdoutPath + } + return "" +} + +func (m *TaskConfig) GetStderrPath() string { + if m != nil { + return m.StderrPath + } + return "" +} + +func (m *TaskConfig) GetTaskGroupName() string { + if m != nil { + return m.TaskGroupName + } + return "" +} + +func (m *TaskConfig) GetJobName() string { + if m != nil { + return m.JobName + } + return "" +} + +func (m *TaskConfig) GetAllocId() string { + if m != nil { + return m.AllocId + } + return "" +} + +type Resources struct { + // AllocatedResources are the resources set for the task + AllocatedResources *AllocatedTaskResources `protobuf:"bytes,1,opt,name=allocated_resources,json=allocatedResources,proto3" json:"allocated_resources,omitempty"` + // LinuxResources are the computed values to set for specific Linux features + LinuxResources *LinuxResources `protobuf:"bytes,2,opt,name=linux_resources,json=linuxResources,proto3" json:"linux_resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (m *Resources) String() string { return proto.CompactTextString(m) } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{27} +} +func (m *Resources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resources.Unmarshal(m, b) +} +func (m *Resources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resources.Marshal(b, m, deterministic) +} +func (dst *Resources) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resources.Merge(dst, src) +} +func (m *Resources) XXX_Size() int { + return xxx_messageInfo_Resources.Size(m) +} +func (m *Resources) XXX_DiscardUnknown() { + xxx_messageInfo_Resources.DiscardUnknown(m) +} + +var xxx_messageInfo_Resources proto.InternalMessageInfo + +func (m *Resources) GetAllocatedResources() *AllocatedTaskResources { + if m != nil { + return m.AllocatedResources + } + return nil +} + +func (m *Resources) GetLinuxResources() *LinuxResources { + if m != nil { + return m.LinuxResources + } + return nil +} + +type AllocatedTaskResources struct { + Cpu *AllocatedCpuResources `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *AllocatedMemoryResources `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` + Networks []*NetworkResource `protobuf:"bytes,5,rep,name=networks,proto3" json:"networks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocatedTaskResources) Reset() { *m = AllocatedTaskResources{} } +func (m *AllocatedTaskResources) String() string { return proto.CompactTextString(m) } +func (*AllocatedTaskResources) ProtoMessage() {} +func (*AllocatedTaskResources) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{28} +} +func (m *AllocatedTaskResources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocatedTaskResources.Unmarshal(m, b) +} +func (m *AllocatedTaskResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocatedTaskResources.Marshal(b, m, deterministic) +} +func (dst *AllocatedTaskResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedTaskResources.Merge(dst, src) +} +func (m *AllocatedTaskResources) XXX_Size() int { + return xxx_messageInfo_AllocatedTaskResources.Size(m) +} +func (m *AllocatedTaskResources) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedTaskResources.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedTaskResources proto.InternalMessageInfo + +func (m *AllocatedTaskResources) GetCpu() *AllocatedCpuResources { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *AllocatedTaskResources) GetMemory() *AllocatedMemoryResources { + if m != nil { + return m.Memory + } + return nil +} + +func (m *AllocatedTaskResources) GetNetworks() []*NetworkResource { + if m != nil { + return m.Networks + } + return nil +} + +type AllocatedCpuResources struct { + CpuShares int64 `protobuf:"varint,1,opt,name=cpu_shares,json=cpuShares,proto3" json:"cpu_shares,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocatedCpuResources) Reset() { *m = AllocatedCpuResources{} } +func (m *AllocatedCpuResources) String() string { return proto.CompactTextString(m) } +func (*AllocatedCpuResources) ProtoMessage() {} +func (*AllocatedCpuResources) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{29} +} +func (m *AllocatedCpuResources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocatedCpuResources.Unmarshal(m, b) +} +func (m *AllocatedCpuResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocatedCpuResources.Marshal(b, m, deterministic) +} +func (dst *AllocatedCpuResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedCpuResources.Merge(dst, src) +} +func (m *AllocatedCpuResources) XXX_Size() int { + return xxx_messageInfo_AllocatedCpuResources.Size(m) +} +func (m *AllocatedCpuResources) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedCpuResources.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedCpuResources proto.InternalMessageInfo + +func (m *AllocatedCpuResources) GetCpuShares() int64 { + if m != nil { + return m.CpuShares + } + return 0 +} + +type AllocatedMemoryResources struct { + MemoryMb int64 `protobuf:"varint,2,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocatedMemoryResources) Reset() { *m = AllocatedMemoryResources{} } +func (m *AllocatedMemoryResources) String() string { return proto.CompactTextString(m) } +func (*AllocatedMemoryResources) ProtoMessage() {} +func (*AllocatedMemoryResources) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{30} +} +func (m *AllocatedMemoryResources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocatedMemoryResources.Unmarshal(m, b) +} +func (m *AllocatedMemoryResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocatedMemoryResources.Marshal(b, m, deterministic) +} +func (dst *AllocatedMemoryResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedMemoryResources.Merge(dst, src) +} +func (m *AllocatedMemoryResources) XXX_Size() int { + return xxx_messageInfo_AllocatedMemoryResources.Size(m) +} +func (m *AllocatedMemoryResources) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedMemoryResources.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedMemoryResources proto.InternalMessageInfo + +func (m *AllocatedMemoryResources) GetMemoryMb() int64 { + if m != nil { + return m.MemoryMb + } + return 0 +} + +type NetworkResource struct { + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + Cidr string `protobuf:"bytes,2,opt,name=cidr,proto3" json:"cidr,omitempty"` + Ip string `protobuf:"bytes,3,opt,name=ip,proto3" json:"ip,omitempty"` + Mbits int32 `protobuf:"varint,4,opt,name=mbits,proto3" json:"mbits,omitempty"` + ReservedPorts []*NetworkPort `protobuf:"bytes,5,rep,name=reserved_ports,json=reservedPorts,proto3" json:"reserved_ports,omitempty"` + DynamicPorts []*NetworkPort `protobuf:"bytes,6,rep,name=dynamic_ports,json=dynamicPorts,proto3" json:"dynamic_ports,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkResource) Reset() { *m = NetworkResource{} } +func (m *NetworkResource) String() string { return proto.CompactTextString(m) } +func (*NetworkResource) ProtoMessage() {} +func (*NetworkResource) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{31} +} +func (m *NetworkResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkResource.Unmarshal(m, b) +} +func (m *NetworkResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkResource.Marshal(b, m, deterministic) +} +func (dst *NetworkResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkResource.Merge(dst, src) +} +func (m *NetworkResource) XXX_Size() int { + return xxx_messageInfo_NetworkResource.Size(m) +} +func (m *NetworkResource) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkResource.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkResource proto.InternalMessageInfo + +func (m *NetworkResource) GetDevice() string { + if m != nil { + return m.Device + } + return "" +} + +func (m *NetworkResource) GetCidr() string { + if m != nil { + return m.Cidr + } + return "" +} + +func (m *NetworkResource) GetIp() string { + if m != nil { + return m.Ip + } + return "" +} + +func (m *NetworkResource) GetMbits() int32 { + if m != nil { + return m.Mbits + } + return 0 +} + +func (m *NetworkResource) GetReservedPorts() []*NetworkPort { + if m != nil { + return m.ReservedPorts + } + return nil +} + +func (m *NetworkResource) GetDynamicPorts() []*NetworkPort { + if m != nil { + return m.DynamicPorts + } + return nil +} + +type NetworkPort struct { + Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkPort) Reset() { *m = NetworkPort{} } +func (m *NetworkPort) String() string { return proto.CompactTextString(m) } +func (*NetworkPort) ProtoMessage() {} +func (*NetworkPort) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{32} +} +func (m *NetworkPort) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkPort.Unmarshal(m, b) +} +func (m *NetworkPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkPort.Marshal(b, m, deterministic) +} +func (dst *NetworkPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPort.Merge(dst, src) +} +func (m *NetworkPort) XXX_Size() int { + return xxx_messageInfo_NetworkPort.Size(m) +} +func (m *NetworkPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPort.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPort proto.InternalMessageInfo + +func (m *NetworkPort) GetLabel() string { + if m != nil { + return m.Label + } + return "" +} + +func (m *NetworkPort) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +type LinuxResources struct { + // CPU CFS (Completely Fair Scheduler) period. Default: 0 (not specified) + CpuPeriod int64 `protobuf:"varint,1,opt,name=cpu_period,json=cpuPeriod,proto3" json:"cpu_period,omitempty"` + // CPU CFS (Completely Fair Scheduler) quota. Default: 0 (not specified) + CpuQuota int64 `protobuf:"varint,2,opt,name=cpu_quota,json=cpuQuota,proto3" json:"cpu_quota,omitempty"` + // CPU shares (relative weight vs. other containers). Default: 0 (not specified) + CpuShares int64 `protobuf:"varint,3,opt,name=cpu_shares,json=cpuShares,proto3" json:"cpu_shares,omitempty"` + // Memory limit in bytes. Default: 0 (not specified) + MemoryLimitBytes int64 `protobuf:"varint,4,opt,name=memory_limit_bytes,json=memoryLimitBytes,proto3" json:"memory_limit_bytes,omitempty"` + // OOMScoreAdj adjusts the oom-killer score. Default: 0 (not specified) + OomScoreAdj int64 `protobuf:"varint,5,opt,name=oom_score_adj,json=oomScoreAdj,proto3" json:"oom_score_adj,omitempty"` + // CpusetCpus constrains the allowed set of logical CPUs. Default: "" (not specified) + CpusetCpus string `protobuf:"bytes,6,opt,name=cpuset_cpus,json=cpusetCpus,proto3" json:"cpuset_cpus,omitempty"` + // CpusetMems constrains the allowed set of memory nodes. Default: "" (not specified) + CpusetMems string `protobuf:"bytes,7,opt,name=cpuset_mems,json=cpusetMems,proto3" json:"cpuset_mems,omitempty"` + // PercentTicks is a compatibility option for docker and should not be used + PercentTicks float64 `protobuf:"fixed64,8,opt,name=PercentTicks,proto3" json:"PercentTicks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinuxResources) Reset() { *m = LinuxResources{} } +func (m *LinuxResources) String() string { return proto.CompactTextString(m) } +func (*LinuxResources) ProtoMessage() {} +func (*LinuxResources) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{33} +} +func (m *LinuxResources) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LinuxResources.Unmarshal(m, b) +} +func (m *LinuxResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LinuxResources.Marshal(b, m, deterministic) +} +func (dst *LinuxResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxResources.Merge(dst, src) +} +func (m *LinuxResources) XXX_Size() int { + return xxx_messageInfo_LinuxResources.Size(m) +} +func (m *LinuxResources) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxResources.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxResources proto.InternalMessageInfo + +func (m *LinuxResources) GetCpuPeriod() int64 { + if m != nil { + return m.CpuPeriod + } + return 0 +} + +func (m *LinuxResources) GetCpuQuota() int64 { + if m != nil { + return m.CpuQuota + } + return 0 +} + +func (m *LinuxResources) GetCpuShares() int64 { + if m != nil { + return m.CpuShares + } + return 0 +} + +func (m *LinuxResources) GetMemoryLimitBytes() int64 { + if m != nil { + return m.MemoryLimitBytes + } + return 0 +} + +func (m *LinuxResources) GetOomScoreAdj() int64 { + if m != nil { + return m.OomScoreAdj + } + return 0 +} + +func (m *LinuxResources) GetCpusetCpus() string { + if m != nil { + return m.CpusetCpus + } + return "" +} + +func (m *LinuxResources) GetCpusetMems() string { + if m != nil { + return m.CpusetMems + } + return "" +} + +func (m *LinuxResources) GetPercentTicks() float64 { + if m != nil { + return m.PercentTicks + } + return 0 +} + +type Mount struct { + // TaskPath is the file path within the task directory to mount to + TaskPath string `protobuf:"bytes,1,opt,name=task_path,json=taskPath,proto3" json:"task_path,omitempty"` + // HostPath is the file path on the host to mount from + HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` + // Readonly if set true, mounts the path in readonly mode + Readonly bool `protobuf:"varint,3,opt,name=readonly,proto3" json:"readonly,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (m *Mount) String() string { return proto.CompactTextString(m) } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{34} +} +func (m *Mount) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Mount.Unmarshal(m, b) +} +func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Mount.Marshal(b, m, deterministic) +} +func (dst *Mount) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mount.Merge(dst, src) +} +func (m *Mount) XXX_Size() int { + return xxx_messageInfo_Mount.Size(m) +} +func (m *Mount) XXX_DiscardUnknown() { + xxx_messageInfo_Mount.DiscardUnknown(m) +} + +var xxx_messageInfo_Mount proto.InternalMessageInfo + +func (m *Mount) GetTaskPath() string { + if m != nil { + return m.TaskPath + } + return "" +} + +func (m *Mount) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *Mount) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +type Device struct { + // TaskPath is the file path within the task to mount the device to + TaskPath string `protobuf:"bytes,1,opt,name=task_path,json=taskPath,proto3" json:"task_path,omitempty"` + // HostPath is the path on the host to the source device + HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` + // CgroupPermissions defines the Cgroup permissions of the device. + // One or more of the following options can be set: + // * r - allows the task to read from the specified device. + // * w - allows the task to write to the specified device. + // * m - allows the task to create device files that do not yet exist. + // + // Example: "rw" + CgroupPermissions string `protobuf:"bytes,3,opt,name=cgroup_permissions,json=cgroupPermissions,proto3" json:"cgroup_permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Device) Reset() { *m = Device{} } +func (m *Device) String() string { return proto.CompactTextString(m) } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{35} +} +func (m *Device) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Device.Unmarshal(m, b) +} +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Device.Marshal(b, m, deterministic) +} +func (dst *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(dst, src) +} +func (m *Device) XXX_Size() int { + return xxx_messageInfo_Device.Size(m) +} +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) +} + +var xxx_messageInfo_Device proto.InternalMessageInfo + +func (m *Device) GetTaskPath() string { + if m != nil { + return m.TaskPath + } + return "" +} + +func (m *Device) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *Device) GetCgroupPermissions() string { + if m != nil { + return m.CgroupPermissions + } + return "" +} + +// TaskHandle is created when starting a task and is used to recover task +type TaskHandle struct { + // Version is used by the driver to version the DriverState schema. + // Version 0 is reserved by Nomad and should not be used. + Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Config is the TaskConfig for the task + Config *TaskConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + // State is the state of the task's execution + State TaskState `protobuf:"varint,3,opt,name=state,proto3,enum=hashicorp.nomad.plugins.drivers.proto.TaskState" json:"state,omitempty"` + // DriverState is the encoded state for the specific driver + DriverState []byte `protobuf:"bytes,4,opt,name=driver_state,json=driverState,proto3" json:"driver_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskHandle) Reset() { *m = TaskHandle{} } +func (m *TaskHandle) String() string { return proto.CompactTextString(m) } +func (*TaskHandle) ProtoMessage() {} +func (*TaskHandle) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{36} +} +func (m *TaskHandle) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskHandle.Unmarshal(m, b) +} +func (m *TaskHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskHandle.Marshal(b, m, deterministic) +} +func (dst *TaskHandle) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskHandle.Merge(dst, src) +} +func (m *TaskHandle) XXX_Size() int { + return xxx_messageInfo_TaskHandle.Size(m) +} +func (m *TaskHandle) XXX_DiscardUnknown() { + xxx_messageInfo_TaskHandle.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskHandle proto.InternalMessageInfo + +func (m *TaskHandle) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *TaskHandle) GetConfig() *TaskConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *TaskHandle) GetState() TaskState { + if m != nil { + return m.State + } + return TaskState_UNKNOWN +} + +func (m *TaskHandle) GetDriverState() []byte { + if m != nil { + return m.DriverState + } + return nil +} + +// NetworkOverride contains network settings which the driver may override +// for the task, such as when the driver is setting up the task's network. +type NetworkOverride struct { + // PortMap can be set to replace ports with driver-specific mappings + PortMap map[string]int32 `protobuf:"bytes,1,rep,name=port_map,json=portMap,proto3" json:"port_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + // Addr is the IP address for the task created by the driver + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` + // AutoAdvertise indicates whether the driver thinks services that choose + // to auto_advertise_addresses should use this IP instead of the host's. + AutoAdvertise bool `protobuf:"varint,3,opt,name=auto_advertise,json=autoAdvertise,proto3" json:"auto_advertise,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkOverride) Reset() { *m = NetworkOverride{} } +func (m *NetworkOverride) String() string { return proto.CompactTextString(m) } +func (*NetworkOverride) ProtoMessage() {} +func (*NetworkOverride) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{37} +} +func (m *NetworkOverride) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkOverride.Unmarshal(m, b) +} +func (m *NetworkOverride) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkOverride.Marshal(b, m, deterministic) +} +func (dst *NetworkOverride) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkOverride.Merge(dst, src) +} +func (m *NetworkOverride) XXX_Size() int { + return xxx_messageInfo_NetworkOverride.Size(m) +} +func (m *NetworkOverride) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkOverride.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkOverride proto.InternalMessageInfo + +func (m *NetworkOverride) GetPortMap() map[string]int32 { + if m != nil { + return m.PortMap + } + return nil +} + +func (m *NetworkOverride) GetAddr() string { + if m != nil { + return m.Addr + } + return "" +} + +func (m *NetworkOverride) GetAutoAdvertise() bool { + if m != nil { + return m.AutoAdvertise + } + return false +} + +// ExitResult contains information about the exit status of a task +type ExitResult struct { + // ExitCode returned from the task on exit + ExitCode int32 `protobuf:"varint,1,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` + // Signal is set if a signal was sent to the task + Signal int32 `protobuf:"varint,2,opt,name=signal,proto3" json:"signal,omitempty"` + // OomKilled is true if the task exited as a result of the OOM Killer + OomKilled bool `protobuf:"varint,3,opt,name=oom_killed,json=oomKilled,proto3" json:"oom_killed,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExitResult) Reset() { *m = ExitResult{} } +func (m *ExitResult) String() string { return proto.CompactTextString(m) } +func (*ExitResult) ProtoMessage() {} +func (*ExitResult) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{38} +} +func (m *ExitResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExitResult.Unmarshal(m, b) +} +func (m *ExitResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExitResult.Marshal(b, m, deterministic) +} +func (dst *ExitResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExitResult.Merge(dst, src) +} +func (m *ExitResult) XXX_Size() int { + return xxx_messageInfo_ExitResult.Size(m) +} +func (m *ExitResult) XXX_DiscardUnknown() { + xxx_messageInfo_ExitResult.DiscardUnknown(m) +} + +var xxx_messageInfo_ExitResult proto.InternalMessageInfo + +func (m *ExitResult) GetExitCode() int32 { + if m != nil { + return m.ExitCode + } + return 0 +} + +func (m *ExitResult) GetSignal() int32 { + if m != nil { + return m.Signal + } + return 0 +} + +func (m *ExitResult) GetOomKilled() bool { + if m != nil { + return m.OomKilled + } + return false +} + +// TaskStatus includes information of a specific task +type TaskStatus struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // State is the state of the task's execution + State TaskState `protobuf:"varint,3,opt,name=state,proto3,enum=hashicorp.nomad.plugins.drivers.proto.TaskState" json:"state,omitempty"` + // StartedAt is the timestamp when the task was started + StartedAt *timestamp.Timestamp `protobuf:"bytes,4,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + // CompletedAt is the timestamp when the task exited. + // If the task is still running, CompletedAt will not be set + CompletedAt *timestamp.Timestamp `protobuf:"bytes,5,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"` + // Result is set when CompletedAt is set. + Result *ExitResult `protobuf:"bytes,6,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskStatus) Reset() { *m = TaskStatus{} } +func (m *TaskStatus) String() string { return proto.CompactTextString(m) } +func (*TaskStatus) ProtoMessage() {} +func (*TaskStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{39} +} +func (m *TaskStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskStatus.Unmarshal(m, b) +} +func (m *TaskStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskStatus.Marshal(b, m, deterministic) +} +func (dst *TaskStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskStatus.Merge(dst, src) +} +func (m *TaskStatus) XXX_Size() int { + return xxx_messageInfo_TaskStatus.Size(m) +} +func (m *TaskStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TaskStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskStatus proto.InternalMessageInfo + +func (m *TaskStatus) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *TaskStatus) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TaskStatus) GetState() TaskState { + if m != nil { + return m.State + } + return TaskState_UNKNOWN +} + +func (m *TaskStatus) GetStartedAt() *timestamp.Timestamp { + if m != nil { + return m.StartedAt + } + return nil +} + +func (m *TaskStatus) GetCompletedAt() *timestamp.Timestamp { + if m != nil { + return m.CompletedAt + } + return nil +} + +func (m *TaskStatus) GetResult() *ExitResult { + if m != nil { + return m.Result + } + return nil +} + +type TaskDriverStatus struct { + // Attributes is a set of string/string key value pairs specific to the + // implementing driver + Attributes map[string]string `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskDriverStatus) Reset() { *m = TaskDriverStatus{} } +func (m *TaskDriverStatus) String() string { return proto.CompactTextString(m) } +func (*TaskDriverStatus) ProtoMessage() {} +func (*TaskDriverStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{40} +} +func (m *TaskDriverStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskDriverStatus.Unmarshal(m, b) +} +func (m *TaskDriverStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskDriverStatus.Marshal(b, m, deterministic) +} +func (dst *TaskDriverStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskDriverStatus.Merge(dst, src) +} +func (m *TaskDriverStatus) XXX_Size() int { + return xxx_messageInfo_TaskDriverStatus.Size(m) +} +func (m *TaskDriverStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TaskDriverStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskDriverStatus proto.InternalMessageInfo + +func (m *TaskDriverStatus) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +type TaskStats struct { + // Id of the task + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Timestamp for which the stats were collected + Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // AggResourceUsage is the aggreate usage of all processes + AggResourceUsage *TaskResourceUsage `protobuf:"bytes,3,opt,name=agg_resource_usage,json=aggResourceUsage,proto3" json:"agg_resource_usage,omitempty"` + // ResourceUsageByPid breaks the usage stats by process + ResourceUsageByPid map[string]*TaskResourceUsage `protobuf:"bytes,4,rep,name=resource_usage_by_pid,json=resourceUsageByPid,proto3" json:"resource_usage_by_pid,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskStats) Reset() { *m = TaskStats{} } +func (m *TaskStats) String() string { return proto.CompactTextString(m) } +func (*TaskStats) ProtoMessage() {} +func (*TaskStats) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{41} +} +func (m *TaskStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskStats.Unmarshal(m, b) +} +func (m *TaskStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskStats.Marshal(b, m, deterministic) +} +func (dst *TaskStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskStats.Merge(dst, src) +} +func (m *TaskStats) XXX_Size() int { + return xxx_messageInfo_TaskStats.Size(m) +} +func (m *TaskStats) XXX_DiscardUnknown() { + xxx_messageInfo_TaskStats.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskStats proto.InternalMessageInfo + +func (m *TaskStats) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *TaskStats) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *TaskStats) GetAggResourceUsage() *TaskResourceUsage { + if m != nil { + return m.AggResourceUsage + } + return nil +} + +func (m *TaskStats) GetResourceUsageByPid() map[string]*TaskResourceUsage { + if m != nil { + return m.ResourceUsageByPid + } + return nil +} + +type TaskResourceUsage struct { + // CPU usage stats + Cpu *CPUUsage `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` + // Memory usage stats + Memory *MemoryUsage `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TaskResourceUsage) Reset() { *m = TaskResourceUsage{} } +func (m *TaskResourceUsage) String() string { return proto.CompactTextString(m) } +func (*TaskResourceUsage) ProtoMessage() {} +func (*TaskResourceUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{42} +} +func (m *TaskResourceUsage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TaskResourceUsage.Unmarshal(m, b) +} +func (m *TaskResourceUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TaskResourceUsage.Marshal(b, m, deterministic) +} +func (dst *TaskResourceUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskResourceUsage.Merge(dst, src) +} +func (m *TaskResourceUsage) XXX_Size() int { + return xxx_messageInfo_TaskResourceUsage.Size(m) +} +func (m *TaskResourceUsage) XXX_DiscardUnknown() { + xxx_messageInfo_TaskResourceUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskResourceUsage proto.InternalMessageInfo + +func (m *TaskResourceUsage) GetCpu() *CPUUsage { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *TaskResourceUsage) GetMemory() *MemoryUsage { + if m != nil { + return m.Memory + } + return nil +} + +type CPUUsage struct { + SystemMode float64 `protobuf:"fixed64,1,opt,name=system_mode,json=systemMode,proto3" json:"system_mode,omitempty"` + UserMode float64 `protobuf:"fixed64,2,opt,name=user_mode,json=userMode,proto3" json:"user_mode,omitempty"` + TotalTicks float64 `protobuf:"fixed64,3,opt,name=total_ticks,json=totalTicks,proto3" json:"total_ticks,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,4,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,5,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` + Percent float64 `protobuf:"fixed64,6,opt,name=percent,proto3" json:"percent,omitempty"` + // MeasuredFields indicates which fields were actually sampled + MeasuredFields []CPUUsage_Fields `protobuf:"varint,7,rep,packed,name=measured_fields,json=measuredFields,proto3,enum=hashicorp.nomad.plugins.drivers.proto.CPUUsage_Fields" json:"measured_fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CPUUsage) Reset() { *m = CPUUsage{} } +func (m *CPUUsage) String() string { return proto.CompactTextString(m) } +func (*CPUUsage) ProtoMessage() {} +func (*CPUUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{43} +} +func (m *CPUUsage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CPUUsage.Unmarshal(m, b) +} +func (m *CPUUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CPUUsage.Marshal(b, m, deterministic) +} +func (dst *CPUUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPUUsage.Merge(dst, src) +} +func (m *CPUUsage) XXX_Size() int { + return xxx_messageInfo_CPUUsage.Size(m) +} +func (m *CPUUsage) XXX_DiscardUnknown() { + xxx_messageInfo_CPUUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_CPUUsage proto.InternalMessageInfo + +func (m *CPUUsage) GetSystemMode() float64 { + if m != nil { + return m.SystemMode + } + return 0 +} + +func (m *CPUUsage) GetUserMode() float64 { + if m != nil { + return m.UserMode + } + return 0 +} + +func (m *CPUUsage) GetTotalTicks() float64 { + if m != nil { + return m.TotalTicks + } + return 0 +} + +func (m *CPUUsage) GetThrottledPeriods() uint64 { + if m != nil { + return m.ThrottledPeriods + } + return 0 +} + +func (m *CPUUsage) GetThrottledTime() uint64 { + if m != nil { + return m.ThrottledTime + } + return 0 +} + +func (m *CPUUsage) GetPercent() float64 { + if m != nil { + return m.Percent + } + return 0 +} + +func (m *CPUUsage) GetMeasuredFields() []CPUUsage_Fields { + if m != nil { + return m.MeasuredFields + } + return nil +} + +type MemoryUsage struct { + Rss uint64 `protobuf:"varint,1,opt,name=rss,proto3" json:"rss,omitempty"` + Cache uint64 `protobuf:"varint,2,opt,name=cache,proto3" json:"cache,omitempty"` + MaxUsage uint64 `protobuf:"varint,3,opt,name=max_usage,json=maxUsage,proto3" json:"max_usage,omitempty"` + KernelUsage uint64 `protobuf:"varint,4,opt,name=kernel_usage,json=kernelUsage,proto3" json:"kernel_usage,omitempty"` + KernelMaxUsage uint64 `protobuf:"varint,5,opt,name=kernel_max_usage,json=kernelMaxUsage,proto3" json:"kernel_max_usage,omitempty"` + Usage uint64 `protobuf:"varint,7,opt,name=usage,proto3" json:"usage,omitempty"` + Swap uint64 `protobuf:"varint,8,opt,name=swap,proto3" json:"swap,omitempty"` + // MeasuredFields indicates which fields were actually sampled + MeasuredFields []MemoryUsage_Fields `protobuf:"varint,6,rep,packed,name=measured_fields,json=measuredFields,proto3,enum=hashicorp.nomad.plugins.drivers.proto.MemoryUsage_Fields" json:"measured_fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryUsage) Reset() { *m = MemoryUsage{} } +func (m *MemoryUsage) String() string { return proto.CompactTextString(m) } +func (*MemoryUsage) ProtoMessage() {} +func (*MemoryUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{44} +} +func (m *MemoryUsage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MemoryUsage.Unmarshal(m, b) +} +func (m *MemoryUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MemoryUsage.Marshal(b, m, deterministic) +} +func (dst *MemoryUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryUsage.Merge(dst, src) +} +func (m *MemoryUsage) XXX_Size() int { + return xxx_messageInfo_MemoryUsage.Size(m) +} +func (m *MemoryUsage) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryUsage proto.InternalMessageInfo + +func (m *MemoryUsage) GetRss() uint64 { + if m != nil { + return m.Rss + } + return 0 +} + +func (m *MemoryUsage) GetCache() uint64 { + if m != nil { + return m.Cache + } + return 0 +} + +func (m *MemoryUsage) GetMaxUsage() uint64 { + if m != nil { + return m.MaxUsage + } + return 0 +} + +func (m *MemoryUsage) GetKernelUsage() uint64 { + if m != nil { + return m.KernelUsage + } + return 0 +} + +func (m *MemoryUsage) GetKernelMaxUsage() uint64 { + if m != nil { + return m.KernelMaxUsage + } + return 0 +} + +func (m *MemoryUsage) GetUsage() uint64 { + if m != nil { + return m.Usage + } + return 0 +} + +func (m *MemoryUsage) GetSwap() uint64 { + if m != nil { + return m.Swap + } + return 0 +} + +func (m *MemoryUsage) GetMeasuredFields() []MemoryUsage_Fields { + if m != nil { + return m.MeasuredFields + } + return nil +} + +type DriverTaskEvent struct { + // TaskId is the id of the task for the event + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // AllocId of the task for the event + AllocId string `protobuf:"bytes,2,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"` + // TaskName is the name of the task for the event + TaskName string `protobuf:"bytes,3,opt,name=task_name,json=taskName,proto3" json:"task_name,omitempty"` + // Timestamp when the event occurred + Timestamp *timestamp.Timestamp `protobuf:"bytes,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Message is the body of the event + Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` + // Annotations allows for additional key/value data to be sent along with the event + Annotations map[string]string `protobuf:"bytes,6,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DriverTaskEvent) Reset() { *m = DriverTaskEvent{} } +func (m *DriverTaskEvent) String() string { return proto.CompactTextString(m) } +func (*DriverTaskEvent) ProtoMessage() {} +func (*DriverTaskEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_driver_8c4ef0bcc46739f6, []int{45} +} +func (m *DriverTaskEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DriverTaskEvent.Unmarshal(m, b) +} +func (m *DriverTaskEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DriverTaskEvent.Marshal(b, m, deterministic) +} +func (dst *DriverTaskEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_DriverTaskEvent.Merge(dst, src) +} +func (m *DriverTaskEvent) XXX_Size() int { + return xxx_messageInfo_DriverTaskEvent.Size(m) +} +func (m *DriverTaskEvent) XXX_DiscardUnknown() { + xxx_messageInfo_DriverTaskEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_DriverTaskEvent proto.InternalMessageInfo + +func (m *DriverTaskEvent) GetTaskId() string { + if m != nil { + return m.TaskId + } + return "" +} + +func (m *DriverTaskEvent) GetAllocId() string { + if m != nil { + return m.AllocId + } + return "" +} + +func (m *DriverTaskEvent) GetTaskName() string { + if m != nil { + return m.TaskName + } + return "" +} + +func (m *DriverTaskEvent) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *DriverTaskEvent) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *DriverTaskEvent) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func init() { + proto.RegisterType((*TaskConfigSchemaRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskConfigSchemaRequest") + proto.RegisterType((*TaskConfigSchemaResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskConfigSchemaResponse") + proto.RegisterType((*CapabilitiesRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.CapabilitiesRequest") + proto.RegisterType((*CapabilitiesResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.CapabilitiesResponse") + proto.RegisterType((*FingerprintRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.FingerprintRequest") + proto.RegisterType((*FingerprintResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.FingerprintResponse") + proto.RegisterMapType((map[string]*proto1.Attribute)(nil), "hashicorp.nomad.plugins.drivers.proto.FingerprintResponse.AttributesEntry") + proto.RegisterType((*RecoverTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.RecoverTaskRequest") + proto.RegisterType((*RecoverTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.RecoverTaskResponse") + proto.RegisterType((*StartTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.StartTaskRequest") + proto.RegisterType((*StartTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.StartTaskResponse") + proto.RegisterType((*WaitTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.WaitTaskRequest") + proto.RegisterType((*WaitTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.WaitTaskResponse") + proto.RegisterType((*StopTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.StopTaskRequest") + proto.RegisterType((*StopTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.StopTaskResponse") + proto.RegisterType((*DestroyTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.DestroyTaskRequest") + proto.RegisterType((*DestroyTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.DestroyTaskResponse") + proto.RegisterType((*InspectTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.InspectTaskRequest") + proto.RegisterType((*InspectTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.InspectTaskResponse") + proto.RegisterType((*TaskStatsRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskStatsRequest") + proto.RegisterType((*TaskStatsResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskStatsResponse") + proto.RegisterType((*TaskEventsRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskEventsRequest") + proto.RegisterType((*SignalTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.SignalTaskRequest") + proto.RegisterType((*SignalTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.SignalTaskResponse") + proto.RegisterType((*ExecTaskRequest)(nil), "hashicorp.nomad.plugins.drivers.proto.ExecTaskRequest") + proto.RegisterType((*ExecTaskResponse)(nil), "hashicorp.nomad.plugins.drivers.proto.ExecTaskResponse") + proto.RegisterType((*DriverCapabilities)(nil), "hashicorp.nomad.plugins.drivers.proto.DriverCapabilities") + proto.RegisterType((*TaskConfig)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskConfig") + proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskConfig.DeviceEnvEntry") + proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskConfig.EnvEntry") + proto.RegisterType((*Resources)(nil), "hashicorp.nomad.plugins.drivers.proto.Resources") + proto.RegisterType((*AllocatedTaskResources)(nil), "hashicorp.nomad.plugins.drivers.proto.AllocatedTaskResources") + proto.RegisterType((*AllocatedCpuResources)(nil), "hashicorp.nomad.plugins.drivers.proto.AllocatedCpuResources") + proto.RegisterType((*AllocatedMemoryResources)(nil), "hashicorp.nomad.plugins.drivers.proto.AllocatedMemoryResources") + proto.RegisterType((*NetworkResource)(nil), "hashicorp.nomad.plugins.drivers.proto.NetworkResource") + proto.RegisterType((*NetworkPort)(nil), "hashicorp.nomad.plugins.drivers.proto.NetworkPort") + proto.RegisterType((*LinuxResources)(nil), "hashicorp.nomad.plugins.drivers.proto.LinuxResources") + proto.RegisterType((*Mount)(nil), "hashicorp.nomad.plugins.drivers.proto.Mount") + proto.RegisterType((*Device)(nil), "hashicorp.nomad.plugins.drivers.proto.Device") + proto.RegisterType((*TaskHandle)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskHandle") + proto.RegisterType((*NetworkOverride)(nil), "hashicorp.nomad.plugins.drivers.proto.NetworkOverride") + proto.RegisterMapType((map[string]int32)(nil), "hashicorp.nomad.plugins.drivers.proto.NetworkOverride.PortMapEntry") + proto.RegisterType((*ExitResult)(nil), "hashicorp.nomad.plugins.drivers.proto.ExitResult") + proto.RegisterType((*TaskStatus)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskStatus") + proto.RegisterType((*TaskDriverStatus)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskDriverStatus") + proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskDriverStatus.AttributesEntry") + proto.RegisterType((*TaskStats)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskStats") + proto.RegisterMapType((map[string]*TaskResourceUsage)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskStats.ResourceUsageByPidEntry") + proto.RegisterType((*TaskResourceUsage)(nil), "hashicorp.nomad.plugins.drivers.proto.TaskResourceUsage") + proto.RegisterType((*CPUUsage)(nil), "hashicorp.nomad.plugins.drivers.proto.CPUUsage") + proto.RegisterType((*MemoryUsage)(nil), "hashicorp.nomad.plugins.drivers.proto.MemoryUsage") + proto.RegisterType((*DriverTaskEvent)(nil), "hashicorp.nomad.plugins.drivers.proto.DriverTaskEvent") + proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.drivers.proto.DriverTaskEvent.AnnotationsEntry") + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.TaskState", TaskState_name, TaskState_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.FingerprintResponse_HealthState", FingerprintResponse_HealthState_name, FingerprintResponse_HealthState_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.StartTaskResponse_Result", StartTaskResponse_Result_name, StartTaskResponse_Result_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.DriverCapabilities_FSIsolation", DriverCapabilities_FSIsolation_name, DriverCapabilities_FSIsolation_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.CPUUsage_Fields", CPUUsage_Fields_name, CPUUsage_Fields_value) + proto.RegisterEnum("hashicorp.nomad.plugins.drivers.proto.MemoryUsage_Fields", MemoryUsage_Fields_name, MemoryUsage_Fields_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// DriverClient is the client API for Driver service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DriverClient interface { + // TaskConfigSchema returns the schema for parsing the driver + // configuration of a task. + TaskConfigSchema(ctx context.Context, in *TaskConfigSchemaRequest, opts ...grpc.CallOption) (*TaskConfigSchemaResponse, error) + // Capabilities returns a set of features which the driver implements. Some + // RPCs are not possible to implement on some runtimes, this allows the + // driver to indicate if it doesn't support these RPCs and features. + Capabilities(ctx context.Context, in *CapabilitiesRequest, opts ...grpc.CallOption) (*CapabilitiesResponse, error) + // Fingerprint starts a stream which emits information about the driver + // including whether the driver healthy and able to function in the + // existing environment. + // + // The driver should immediately stream a FingerprintResponse when the RPC + // is initially called, then send any additional responses if there is a + // change in the driver's state. + Fingerprint(ctx context.Context, in *FingerprintRequest, opts ...grpc.CallOption) (Driver_FingerprintClient, error) + // RecoverTask is used when a task has been started but the driver may not + // know about it. Such is the case if the driver restarts or is upgraded. + RecoverTask(ctx context.Context, in *RecoverTaskRequest, opts ...grpc.CallOption) (*RecoverTaskResponse, error) + // StartTask starts and tracks the task on the implemented runtime + StartTask(ctx context.Context, in *StartTaskRequest, opts ...grpc.CallOption) (*StartTaskResponse, error) + // WaitTask blocks until the given task exits, returning the result of the + // task. It may be called after the task has exited, but before the task is + // destroyed. + WaitTask(ctx context.Context, in *WaitTaskRequest, opts ...grpc.CallOption) (*WaitTaskResponse, error) + // StopTask stops a given task by sending the desired signal to the process. + // If the task does not exit on its own within the given timeout, it will be + // forcefully killed. + StopTask(ctx context.Context, in *StopTaskRequest, opts ...grpc.CallOption) (*StopTaskResponse, error) + // DestroyTask removes the task from the driver's internal state and cleans + // up any additional resources created by the driver. It cannot be called + // on a running task, unless force is set to true. + DestroyTask(ctx context.Context, in *DestroyTaskRequest, opts ...grpc.CallOption) (*DestroyTaskResponse, error) + // InspectTask returns detailed information for the given task + InspectTask(ctx context.Context, in *InspectTaskRequest, opts ...grpc.CallOption) (*InspectTaskResponse, error) + // TaskStats collects and returns runtime metrics for the given task + TaskStats(ctx context.Context, in *TaskStatsRequest, opts ...grpc.CallOption) (Driver_TaskStatsClient, error) + // TaskEvents starts a streaming RPC where all task events emitted by the + // driver are streamed to the caller. + TaskEvents(ctx context.Context, in *TaskEventsRequest, opts ...grpc.CallOption) (Driver_TaskEventsClient, error) + // SignalTask sends a signal to the task + SignalTask(ctx context.Context, in *SignalTaskRequest, opts ...grpc.CallOption) (*SignalTaskResponse, error) + // ExecTask executes a command inside the tasks execution context + ExecTask(ctx context.Context, in *ExecTaskRequest, opts ...grpc.CallOption) (*ExecTaskResponse, error) +} + +type driverClient struct { + cc *grpc.ClientConn +} + +func NewDriverClient(cc *grpc.ClientConn) DriverClient { + return &driverClient{cc} +} + +func (c *driverClient) TaskConfigSchema(ctx context.Context, in *TaskConfigSchemaRequest, opts ...grpc.CallOption) (*TaskConfigSchemaResponse, error) { + out := new(TaskConfigSchemaResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/TaskConfigSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) Capabilities(ctx context.Context, in *CapabilitiesRequest, opts ...grpc.CallOption) (*CapabilitiesResponse, error) { + out := new(CapabilitiesResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/Capabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) Fingerprint(ctx context.Context, in *FingerprintRequest, opts ...grpc.CallOption) (Driver_FingerprintClient, error) { + stream, err := c.cc.NewStream(ctx, &_Driver_serviceDesc.Streams[0], "/hashicorp.nomad.plugins.drivers.proto.Driver/Fingerprint", opts...) + if err != nil { + return nil, err + } + x := &driverFingerprintClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Driver_FingerprintClient interface { + Recv() (*FingerprintResponse, error) + grpc.ClientStream +} + +type driverFingerprintClient struct { + grpc.ClientStream +} + +func (x *driverFingerprintClient) Recv() (*FingerprintResponse, error) { + m := new(FingerprintResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *driverClient) RecoverTask(ctx context.Context, in *RecoverTaskRequest, opts ...grpc.CallOption) (*RecoverTaskResponse, error) { + out := new(RecoverTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/RecoverTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) StartTask(ctx context.Context, in *StartTaskRequest, opts ...grpc.CallOption) (*StartTaskResponse, error) { + out := new(StartTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/StartTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) WaitTask(ctx context.Context, in *WaitTaskRequest, opts ...grpc.CallOption) (*WaitTaskResponse, error) { + out := new(WaitTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/WaitTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) StopTask(ctx context.Context, in *StopTaskRequest, opts ...grpc.CallOption) (*StopTaskResponse, error) { + out := new(StopTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/StopTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) DestroyTask(ctx context.Context, in *DestroyTaskRequest, opts ...grpc.CallOption) (*DestroyTaskResponse, error) { + out := new(DestroyTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/DestroyTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) InspectTask(ctx context.Context, in *InspectTaskRequest, opts ...grpc.CallOption) (*InspectTaskResponse, error) { + out := new(InspectTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/InspectTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) TaskStats(ctx context.Context, in *TaskStatsRequest, opts ...grpc.CallOption) (Driver_TaskStatsClient, error) { + stream, err := c.cc.NewStream(ctx, &_Driver_serviceDesc.Streams[1], "/hashicorp.nomad.plugins.drivers.proto.Driver/TaskStats", opts...) + if err != nil { + return nil, err + } + x := &driverTaskStatsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Driver_TaskStatsClient interface { + Recv() (*TaskStatsResponse, error) + grpc.ClientStream +} + +type driverTaskStatsClient struct { + grpc.ClientStream +} + +func (x *driverTaskStatsClient) Recv() (*TaskStatsResponse, error) { + m := new(TaskStatsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *driverClient) TaskEvents(ctx context.Context, in *TaskEventsRequest, opts ...grpc.CallOption) (Driver_TaskEventsClient, error) { + stream, err := c.cc.NewStream(ctx, &_Driver_serviceDesc.Streams[2], "/hashicorp.nomad.plugins.drivers.proto.Driver/TaskEvents", opts...) + if err != nil { + return nil, err + } + x := &driverTaskEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Driver_TaskEventsClient interface { + Recv() (*DriverTaskEvent, error) + grpc.ClientStream +} + +type driverTaskEventsClient struct { + grpc.ClientStream +} + +func (x *driverTaskEventsClient) Recv() (*DriverTaskEvent, error) { + m := new(DriverTaskEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *driverClient) SignalTask(ctx context.Context, in *SignalTaskRequest, opts ...grpc.CallOption) (*SignalTaskResponse, error) { + out := new(SignalTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/SignalTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *driverClient) ExecTask(ctx context.Context, in *ExecTaskRequest, opts ...grpc.CallOption) (*ExecTaskResponse, error) { + out := new(ExecTaskResponse) + err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.drivers.proto.Driver/ExecTask", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DriverServer is the server API for Driver service. +type DriverServer interface { + // TaskConfigSchema returns the schema for parsing the driver + // configuration of a task. + TaskConfigSchema(context.Context, *TaskConfigSchemaRequest) (*TaskConfigSchemaResponse, error) + // Capabilities returns a set of features which the driver implements. Some + // RPCs are not possible to implement on some runtimes, this allows the + // driver to indicate if it doesn't support these RPCs and features. + Capabilities(context.Context, *CapabilitiesRequest) (*CapabilitiesResponse, error) + // Fingerprint starts a stream which emits information about the driver + // including whether the driver healthy and able to function in the + // existing environment. + // + // The driver should immediately stream a FingerprintResponse when the RPC + // is initially called, then send any additional responses if there is a + // change in the driver's state. + Fingerprint(*FingerprintRequest, Driver_FingerprintServer) error + // RecoverTask is used when a task has been started but the driver may not + // know about it. Such is the case if the driver restarts or is upgraded. + RecoverTask(context.Context, *RecoverTaskRequest) (*RecoverTaskResponse, error) + // StartTask starts and tracks the task on the implemented runtime + StartTask(context.Context, *StartTaskRequest) (*StartTaskResponse, error) + // WaitTask blocks until the given task exits, returning the result of the + // task. It may be called after the task has exited, but before the task is + // destroyed. + WaitTask(context.Context, *WaitTaskRequest) (*WaitTaskResponse, error) + // StopTask stops a given task by sending the desired signal to the process. + // If the task does not exit on its own within the given timeout, it will be + // forcefully killed. + StopTask(context.Context, *StopTaskRequest) (*StopTaskResponse, error) + // DestroyTask removes the task from the driver's internal state and cleans + // up any additional resources created by the driver. It cannot be called + // on a running task, unless force is set to true. + DestroyTask(context.Context, *DestroyTaskRequest) (*DestroyTaskResponse, error) + // InspectTask returns detailed information for the given task + InspectTask(context.Context, *InspectTaskRequest) (*InspectTaskResponse, error) + // TaskStats collects and returns runtime metrics for the given task + TaskStats(*TaskStatsRequest, Driver_TaskStatsServer) error + // TaskEvents starts a streaming RPC where all task events emitted by the + // driver are streamed to the caller. + TaskEvents(*TaskEventsRequest, Driver_TaskEventsServer) error + // SignalTask sends a signal to the task + SignalTask(context.Context, *SignalTaskRequest) (*SignalTaskResponse, error) + // ExecTask executes a command inside the tasks execution context + ExecTask(context.Context, *ExecTaskRequest) (*ExecTaskResponse, error) +} + +func RegisterDriverServer(s *grpc.Server, srv DriverServer) { + s.RegisterService(&_Driver_serviceDesc, srv) +} + +func _Driver_TaskConfigSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TaskConfigSchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).TaskConfigSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/TaskConfigSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).TaskConfigSchema(ctx, req.(*TaskConfigSchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_Capabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).Capabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/Capabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).Capabilities(ctx, req.(*CapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_Fingerprint_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(FingerprintRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DriverServer).Fingerprint(m, &driverFingerprintServer{stream}) +} + +type Driver_FingerprintServer interface { + Send(*FingerprintResponse) error + grpc.ServerStream +} + +type driverFingerprintServer struct { + grpc.ServerStream +} + +func (x *driverFingerprintServer) Send(m *FingerprintResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Driver_RecoverTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RecoverTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).RecoverTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/RecoverTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).RecoverTask(ctx, req.(*RecoverTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_StartTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).StartTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/StartTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).StartTask(ctx, req.(*StartTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_WaitTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WaitTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).WaitTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/WaitTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).WaitTask(ctx, req.(*WaitTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_StopTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).StopTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/StopTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).StopTask(ctx, req.(*StopTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_DestroyTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DestroyTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).DestroyTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/DestroyTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).DestroyTask(ctx, req.(*DestroyTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_InspectTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InspectTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).InspectTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/InspectTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).InspectTask(ctx, req.(*InspectTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_TaskStats_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TaskStatsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DriverServer).TaskStats(m, &driverTaskStatsServer{stream}) +} + +type Driver_TaskStatsServer interface { + Send(*TaskStatsResponse) error + grpc.ServerStream +} + +type driverTaskStatsServer struct { + grpc.ServerStream +} + +func (x *driverTaskStatsServer) Send(m *TaskStatsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Driver_TaskEvents_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TaskEventsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DriverServer).TaskEvents(m, &driverTaskEventsServer{stream}) +} + +type Driver_TaskEventsServer interface { + Send(*DriverTaskEvent) error + grpc.ServerStream +} + +type driverTaskEventsServer struct { + grpc.ServerStream +} + +func (x *driverTaskEventsServer) Send(m *DriverTaskEvent) error { + return x.ServerStream.SendMsg(m) +} + +func _Driver_SignalTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignalTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).SignalTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/SignalTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).SignalTask(ctx, req.(*SignalTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Driver_ExecTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).ExecTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hashicorp.nomad.plugins.drivers.proto.Driver/ExecTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).ExecTask(ctx, req.(*ExecTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Driver_serviceDesc = grpc.ServiceDesc{ + ServiceName: "hashicorp.nomad.plugins.drivers.proto.Driver", + HandlerType: (*DriverServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "TaskConfigSchema", + Handler: _Driver_TaskConfigSchema_Handler, + }, + { + MethodName: "Capabilities", + Handler: _Driver_Capabilities_Handler, + }, + { + MethodName: "RecoverTask", + Handler: _Driver_RecoverTask_Handler, + }, + { + MethodName: "StartTask", + Handler: _Driver_StartTask_Handler, + }, + { + MethodName: "WaitTask", + Handler: _Driver_WaitTask_Handler, + }, + { + MethodName: "StopTask", + Handler: _Driver_StopTask_Handler, + }, + { + MethodName: "DestroyTask", + Handler: _Driver_DestroyTask_Handler, + }, + { + MethodName: "InspectTask", + Handler: _Driver_InspectTask_Handler, + }, + { + MethodName: "SignalTask", + Handler: _Driver_SignalTask_Handler, + }, + { + MethodName: "ExecTask", + Handler: _Driver_ExecTask_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Fingerprint", + Handler: _Driver_Fingerprint_Handler, + ServerStreams: true, + }, + { + StreamName: "TaskStats", + Handler: _Driver_TaskStats_Handler, + ServerStreams: true, + }, + { + StreamName: "TaskEvents", + Handler: _Driver_TaskEvents_Handler, + ServerStreams: true, + }, + }, + Metadata: "plugins/drivers/proto/driver.proto", +} + +func init() { + proto.RegisterFile("plugins/drivers/proto/driver.proto", fileDescriptor_driver_8c4ef0bcc46739f6) +} + +var fileDescriptor_driver_8c4ef0bcc46739f6 = []byte{ + // 3009 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xdb, 0x6f, 0xe3, 0xc6, + 0xd5, 0xb7, 0xae, 0x96, 0x8e, 0x6c, 0x99, 0x3b, 0xbb, 0x9b, 0x28, 0x0a, 0xbe, 0x2f, 0x1b, 0x02, + 0xf9, 0x60, 0x24, 0x59, 0x39, 0x71, 0xf0, 0xed, 0xad, 0xb9, 0x29, 0x12, 0xd7, 0x76, 0xd6, 0x96, + 0xdd, 0x91, 0x8c, 0xcd, 0xb6, 0x4d, 0x58, 0x9a, 0x9c, 0x95, 0xb8, 0xe6, 0x2d, 0xe4, 0xd0, 0x6b, + 0xa3, 0x28, 0x5a, 0xa4, 0x40, 0xd1, 0x3e, 0x14, 0xe8, 0x4b, 0x90, 0xf7, 0xf6, 0xb1, 0x7f, 0x41, + 0x5b, 0xe4, 0x2f, 0x69, 0x5f, 0x5a, 0xa0, 0x40, 0x5f, 0xfb, 0x1f, 0x14, 0x73, 0x21, 0x45, 0xd9, + 0xde, 0x2c, 0xa5, 0xcd, 0x13, 0x79, 0xce, 0xcc, 0xf9, 0xcd, 0x99, 0x39, 0x67, 0xe6, 0x9c, 0x99, + 0x03, 0x6a, 0xe0, 0xc4, 0x63, 0xdb, 0x8b, 0x36, 0xac, 0xd0, 0x3e, 0x21, 0x61, 0xb4, 0x11, 0x84, + 0x3e, 0xf5, 0x25, 0xd5, 0xe1, 0x04, 0x7a, 0x63, 0x62, 0x44, 0x13, 0xdb, 0xf4, 0xc3, 0xa0, 0xe3, + 0xf9, 0xae, 0x61, 0x75, 0xa4, 0x4c, 0x47, 0xca, 0x88, 0x6e, 0xed, 0xff, 0x1d, 0xfb, 0xfe, 0xd8, + 0x21, 0x02, 0xe1, 0x28, 0x7e, 0xbc, 0x61, 0xc5, 0xa1, 0x41, 0x6d, 0xdf, 0x93, 0xed, 0xaf, 0x9d, + 0x6f, 0xa7, 0xb6, 0x4b, 0x22, 0x6a, 0xb8, 0x81, 0xec, 0xf0, 0xf1, 0xd8, 0xa6, 0x93, 0xf8, 0xa8, + 0x63, 0xfa, 0xee, 0x46, 0x3a, 0xe4, 0x06, 0x1f, 0x72, 0x23, 0x51, 0x33, 0x9a, 0x18, 0x21, 0xb1, + 0x36, 0x26, 0xa6, 0x13, 0x05, 0xc4, 0x64, 0x5f, 0x9d, 0xfd, 0x48, 0x84, 0xad, 0xfc, 0x08, 0x11, + 0x0d, 0x63, 0x93, 0x26, 0xf3, 0x35, 0x28, 0x0d, 0xed, 0xa3, 0x98, 0x12, 0x01, 0xa4, 0xbe, 0x02, + 0x2f, 0x8f, 0x8c, 0xe8, 0xb8, 0xe7, 0x7b, 0x8f, 0xed, 0xf1, 0xd0, 0x9c, 0x10, 0xd7, 0xc0, 0xe4, + 0xcb, 0x98, 0x44, 0x54, 0xfd, 0x09, 0xb4, 0x2e, 0x36, 0x45, 0x81, 0xef, 0x45, 0x04, 0x7d, 0x0c, + 0x65, 0xa6, 0x4d, 0xab, 0x70, 0xa3, 0xb0, 0xde, 0xd8, 0x7c, 0xbb, 0xf3, 0xac, 0x85, 0x13, 0x3a, + 0x74, 0xe4, 0x2c, 0x3a, 0xc3, 0x80, 0x98, 0x98, 0x4b, 0xaa, 0xd7, 0xe1, 0x6a, 0xcf, 0x08, 0x8c, + 0x23, 0xdb, 0xb1, 0xa9, 0x4d, 0xa2, 0x64, 0xd0, 0x18, 0xae, 0xcd, 0xb2, 0xe5, 0x80, 0x9f, 0xc3, + 0x8a, 0x99, 0xe1, 0xcb, 0x81, 0xef, 0x76, 0x72, 0x59, 0xac, 0xd3, 0xe7, 0xd4, 0x0c, 0xf0, 0x0c, + 0x9c, 0x7a, 0x0d, 0xd0, 0x7d, 0xdb, 0x1b, 0x93, 0x30, 0x08, 0x6d, 0x8f, 0x26, 0xca, 0x7c, 0x5b, + 0x82, 0xab, 0x33, 0x6c, 0xa9, 0xcc, 0x13, 0x80, 0x74, 0x1d, 0x99, 0x2a, 0xa5, 0xf5, 0xc6, 0xe6, + 0xa7, 0x39, 0x55, 0xb9, 0x04, 0xaf, 0xd3, 0x4d, 0xc1, 0x34, 0x8f, 0x86, 0x67, 0x38, 0x83, 0x8e, + 0xbe, 0x80, 0xea, 0x84, 0x18, 0x0e, 0x9d, 0xb4, 0x8a, 0x37, 0x0a, 0xeb, 0xcd, 0xcd, 0xfb, 0x2f, + 0x30, 0xce, 0x36, 0x07, 0x1a, 0x52, 0x83, 0x12, 0x2c, 0x51, 0xd1, 0x4d, 0x40, 0xe2, 0x4f, 0xb7, + 0x48, 0x64, 0x86, 0x76, 0xc0, 0x1c, 0xb9, 0x55, 0xba, 0x51, 0x58, 0xaf, 0xe3, 0x2b, 0xa2, 0xa5, + 0x3f, 0x6d, 0x68, 0x07, 0xb0, 0x76, 0x4e, 0x5b, 0xa4, 0x40, 0xe9, 0x98, 0x9c, 0x71, 0x8b, 0xd4, + 0x31, 0xfb, 0x45, 0x5b, 0x50, 0x39, 0x31, 0x9c, 0x98, 0x70, 0x95, 0x1b, 0x9b, 0xef, 0x3e, 0xcf, + 0x3d, 0xa4, 0x8b, 0x4e, 0xd7, 0x01, 0x0b, 0xf9, 0x7b, 0xc5, 0x3b, 0x05, 0xf5, 0x2e, 0x34, 0x32, + 0x7a, 0xa3, 0x26, 0xc0, 0xe1, 0xa0, 0xaf, 0x8d, 0xb4, 0xde, 0x48, 0xeb, 0x2b, 0x4b, 0x68, 0x15, + 0xea, 0x87, 0x83, 0x6d, 0xad, 0xbb, 0x3b, 0xda, 0x7e, 0xa4, 0x14, 0x50, 0x03, 0x96, 0x13, 0xa2, + 0xa8, 0x9e, 0x02, 0xc2, 0xc4, 0xf4, 0x4f, 0x48, 0xc8, 0x1c, 0x59, 0x5a, 0x15, 0xbd, 0x0c, 0xcb, + 0xd4, 0x88, 0x8e, 0x75, 0xdb, 0x92, 0x3a, 0x57, 0x19, 0xb9, 0x63, 0xa1, 0x1d, 0xa8, 0x4e, 0x0c, + 0xcf, 0x72, 0x9e, 0xaf, 0xf7, 0xec, 0x52, 0x33, 0xf0, 0x6d, 0x2e, 0x88, 0x25, 0x00, 0xf3, 0xee, + 0x99, 0x91, 0x85, 0x01, 0xd4, 0x47, 0xa0, 0x0c, 0xa9, 0x11, 0xd2, 0xac, 0x3a, 0x1a, 0x94, 0xd9, + 0xf8, 0xd2, 0xa3, 0xe7, 0x19, 0x53, 0xec, 0x4c, 0xcc, 0xc5, 0xd5, 0xff, 0x14, 0xe1, 0x4a, 0x06, + 0x5b, 0x7a, 0xea, 0x43, 0xa8, 0x86, 0x24, 0x8a, 0x1d, 0xca, 0xe1, 0x9b, 0x9b, 0x1f, 0xe5, 0x84, + 0xbf, 0x80, 0xd4, 0xc1, 0x1c, 0x06, 0x4b, 0x38, 0xb4, 0x0e, 0x8a, 0x90, 0xd0, 0x49, 0x18, 0xfa, + 0xa1, 0xee, 0x46, 0x63, 0xbe, 0x6a, 0x75, 0xdc, 0x14, 0x7c, 0x8d, 0xb1, 0xf7, 0xa2, 0x71, 0x66, + 0x55, 0x4b, 0x2f, 0xb8, 0xaa, 0xc8, 0x00, 0xc5, 0x23, 0xf4, 0xa9, 0x1f, 0x1e, 0xeb, 0x6c, 0x69, + 0x43, 0xdb, 0x22, 0xad, 0x32, 0x07, 0xbd, 0x95, 0x13, 0x74, 0x20, 0xc4, 0xf7, 0xa5, 0x34, 0x5e, + 0xf3, 0x66, 0x19, 0xea, 0x5b, 0x50, 0x15, 0x33, 0x65, 0x9e, 0x34, 0x3c, 0xec, 0xf5, 0xb4, 0xe1, + 0x50, 0x59, 0x42, 0x75, 0xa8, 0x60, 0x6d, 0x84, 0x99, 0x87, 0xd5, 0xa1, 0x72, 0xbf, 0x3b, 0xea, + 0xee, 0x2a, 0x45, 0xf5, 0x4d, 0x58, 0x7b, 0x68, 0xd8, 0x34, 0x8f, 0x73, 0xa9, 0x3e, 0x28, 0xd3, + 0xbe, 0xd2, 0x3a, 0x3b, 0x33, 0xd6, 0xc9, 0xbf, 0x34, 0xda, 0xa9, 0x4d, 0xcf, 0xd9, 0x43, 0x81, + 0x12, 0x09, 0x43, 0x69, 0x02, 0xf6, 0xab, 0x3e, 0x85, 0xb5, 0x21, 0xf5, 0x83, 0x5c, 0x9e, 0xff, + 0x1e, 0x2c, 0xb3, 0x18, 0xe5, 0xc7, 0x54, 0xba, 0xfe, 0x2b, 0x1d, 0x11, 0xc3, 0x3a, 0x49, 0x0c, + 0xeb, 0xf4, 0x65, 0x8c, 0xc3, 0x49, 0x4f, 0xf4, 0x12, 0x54, 0x23, 0x7b, 0xec, 0x19, 0x8e, 0x3c, + 0x2d, 0x24, 0xa5, 0x22, 0xe6, 0xe4, 0xc9, 0xc0, 0xd2, 0xf1, 0x7b, 0x80, 0xfa, 0x24, 0xa2, 0xa1, + 0x7f, 0x96, 0x4b, 0x9f, 0x6b, 0x50, 0x79, 0xec, 0x87, 0xa6, 0xd8, 0x88, 0x35, 0x2c, 0x08, 0xb6, + 0xa9, 0x66, 0x40, 0x24, 0xf6, 0x4d, 0x40, 0x3b, 0x1e, 0x8b, 0x29, 0xf9, 0x0c, 0xf1, 0xfb, 0x22, + 0x5c, 0x9d, 0xe9, 0x2f, 0x8d, 0xb1, 0xf8, 0x3e, 0x64, 0x07, 0x53, 0x1c, 0x89, 0x7d, 0x88, 0xf6, + 0xa1, 0x2a, 0x7a, 0xc8, 0x95, 0xbc, 0x3d, 0x07, 0x90, 0x08, 0x53, 0x12, 0x4e, 0xc2, 0x5c, 0xea, + 0xf4, 0xa5, 0xef, 0xd7, 0xe9, 0x9f, 0x82, 0x92, 0xcc, 0x23, 0x7a, 0xae, 0x6d, 0x3e, 0x85, 0xab, + 0xa6, 0xef, 0x38, 0xc4, 0x64, 0xde, 0xa0, 0xdb, 0x1e, 0x25, 0xe1, 0x89, 0xe1, 0x3c, 0xdf, 0x6f, + 0xd0, 0x54, 0x6a, 0x47, 0x0a, 0xa9, 0x3f, 0x86, 0x2b, 0x99, 0x81, 0xa5, 0x21, 0xee, 0x43, 0x25, + 0x62, 0x0c, 0x69, 0x89, 0x77, 0xe6, 0xb4, 0x44, 0x84, 0x85, 0xb8, 0x7a, 0x55, 0x80, 0x6b, 0x27, + 0xc4, 0x4b, 0xa7, 0xa5, 0xf6, 0xe1, 0xca, 0x90, 0xbb, 0x69, 0x2e, 0x3f, 0x9c, 0xba, 0x78, 0x71, + 0xc6, 0xc5, 0xaf, 0x01, 0xca, 0xa2, 0x48, 0x47, 0x3c, 0x83, 0x35, 0xed, 0x94, 0x98, 0xb9, 0x90, + 0x5b, 0xb0, 0x6c, 0xfa, 0xae, 0x6b, 0x78, 0x56, 0xab, 0x78, 0xa3, 0xb4, 0x5e, 0xc7, 0x09, 0x99, + 0xdd, 0x8b, 0xa5, 0xbc, 0x7b, 0x51, 0xfd, 0x5d, 0x01, 0x94, 0xe9, 0xd8, 0x72, 0x21, 0x99, 0xf6, + 0xd4, 0x62, 0x40, 0x6c, 0xec, 0x15, 0x2c, 0x29, 0xc9, 0x4f, 0x8e, 0x0b, 0xc1, 0x27, 0x61, 0x98, + 0x39, 0x8e, 0x4a, 0x2f, 0x78, 0x1c, 0xa9, 0xff, 0x2a, 0x00, 0xba, 0x98, 0x74, 0xa1, 0xd7, 0x61, + 0x25, 0x22, 0x9e, 0xa5, 0x8b, 0x65, 0x14, 0x16, 0xae, 0xe1, 0x06, 0xe3, 0x89, 0xf5, 0x8c, 0x10, + 0x82, 0x32, 0x39, 0x25, 0xa6, 0xdc, 0xf9, 0xfc, 0x1f, 0x4d, 0x60, 0xe5, 0x71, 0xa4, 0xdb, 0x91, + 0xef, 0x18, 0x69, 0x76, 0xd2, 0xdc, 0xd4, 0x16, 0x4e, 0xfe, 0x3a, 0xf7, 0x87, 0x3b, 0x09, 0x18, + 0x6e, 0x3c, 0x8e, 0x52, 0x42, 0xed, 0x40, 0x23, 0xd3, 0x86, 0x6a, 0x50, 0x1e, 0xec, 0x0f, 0x34, + 0x65, 0x09, 0x01, 0x54, 0x7b, 0xdb, 0x78, 0x7f, 0x7f, 0x24, 0x22, 0xc0, 0xce, 0x5e, 0x77, 0x4b, + 0x53, 0x8a, 0xea, 0x9f, 0xab, 0x00, 0xd3, 0x50, 0x8c, 0x9a, 0x50, 0x4c, 0x2d, 0x5d, 0xb4, 0x2d, + 0x36, 0x19, 0xcf, 0x70, 0x89, 0xf4, 0x1e, 0xfe, 0x8f, 0x36, 0xe1, 0xba, 0x1b, 0x8d, 0x03, 0xc3, + 0x3c, 0xd6, 0x65, 0x04, 0x35, 0xb9, 0x30, 0x9f, 0xd5, 0x0a, 0xbe, 0x2a, 0x1b, 0xa5, 0xd6, 0x02, + 0x77, 0x17, 0x4a, 0xc4, 0x3b, 0x69, 0x95, 0x79, 0xa6, 0x79, 0x6f, 0xee, 0x14, 0xa1, 0xa3, 0x79, + 0x27, 0x22, 0xb3, 0x64, 0x30, 0x48, 0x07, 0xb0, 0xc8, 0x89, 0x6d, 0x12, 0x9d, 0x81, 0x56, 0x38, + 0xe8, 0xc7, 0xf3, 0x83, 0xf6, 0x39, 0x46, 0x0a, 0x5d, 0xb7, 0x12, 0x1a, 0x0d, 0xa0, 0x1e, 0x92, + 0xc8, 0x8f, 0x43, 0x93, 0x44, 0xad, 0xea, 0x5c, 0xbb, 0x18, 0x27, 0x72, 0x78, 0x0a, 0x81, 0xfa, + 0x50, 0x75, 0xfd, 0xd8, 0xa3, 0x51, 0x6b, 0x99, 0x2b, 0xfb, 0x76, 0x4e, 0xb0, 0x3d, 0x26, 0x84, + 0xa5, 0x2c, 0xda, 0x82, 0x65, 0xa1, 0x62, 0xd4, 0xaa, 0x71, 0x98, 0x9b, 0x79, 0x1d, 0x88, 0x4b, + 0xe1, 0x44, 0x9a, 0x59, 0x35, 0x8e, 0x48, 0xd8, 0xaa, 0x0b, 0xab, 0xb2, 0x7f, 0xf4, 0x2a, 0xd4, + 0x0d, 0xc7, 0xf1, 0x4d, 0xdd, 0xb2, 0xc3, 0x16, 0xf0, 0x86, 0x1a, 0x67, 0xf4, 0xed, 0x10, 0xbd, + 0x06, 0x0d, 0xb1, 0xf5, 0xf4, 0xc0, 0xa0, 0x93, 0x56, 0x83, 0x37, 0x83, 0x60, 0x1d, 0x18, 0x74, + 0x22, 0x3b, 0x90, 0x30, 0x14, 0x1d, 0x56, 0xd2, 0x0e, 0x24, 0x0c, 0x79, 0x87, 0xff, 0x83, 0x35, + 0x7e, 0x8e, 0x8c, 0x43, 0x3f, 0x0e, 0x74, 0xee, 0x53, 0xab, 0xbc, 0xd3, 0x2a, 0x63, 0x6f, 0x31, + 0xee, 0x80, 0x39, 0xd7, 0x2b, 0x50, 0x7b, 0xe2, 0x1f, 0x89, 0x0e, 0x4d, 0xde, 0x61, 0xf9, 0x89, + 0x7f, 0x94, 0x34, 0x09, 0x0d, 0x6d, 0xab, 0xb5, 0x26, 0x9a, 0x38, 0xbd, 0x63, 0xb5, 0x6f, 0x41, + 0x2d, 0x31, 0xe3, 0x25, 0xd9, 0xfc, 0xb5, 0x6c, 0x36, 0x5f, 0xcf, 0xa4, 0xe6, 0xed, 0xf7, 0xa1, + 0x39, 0xeb, 0x04, 0xf3, 0x48, 0xab, 0x7f, 0x2b, 0x40, 0x3d, 0x35, 0x37, 0xf2, 0xe0, 0x2a, 0x57, + 0xc7, 0xa0, 0xc4, 0xd2, 0xa7, 0xde, 0x23, 0x62, 0xc0, 0x07, 0x39, 0x2d, 0xd5, 0x4d, 0x10, 0xe4, + 0x39, 0x28, 0x5d, 0x09, 0xa5, 0xc8, 0xd3, 0xf1, 0xbe, 0x80, 0x35, 0xc7, 0xf6, 0xe2, 0xd3, 0xcc, + 0x58, 0x22, 0x84, 0xfd, 0x7f, 0xce, 0xb1, 0x76, 0x99, 0xf4, 0x74, 0x8c, 0xa6, 0x33, 0x43, 0xab, + 0x5f, 0x17, 0xe1, 0xa5, 0xcb, 0xd5, 0x41, 0x03, 0x28, 0x99, 0x41, 0x2c, 0xa7, 0xf6, 0xfe, 0xbc, + 0x53, 0xeb, 0x05, 0xf1, 0x74, 0x54, 0x06, 0xc4, 0x92, 0x7c, 0x97, 0xb8, 0x7e, 0x78, 0x26, 0x67, + 0xf0, 0xd1, 0xbc, 0x90, 0x7b, 0x5c, 0x7a, 0x8a, 0x2a, 0xe1, 0x10, 0x86, 0x9a, 0x4c, 0x15, 0x22, + 0x79, 0x4c, 0xcc, 0x99, 0x72, 0x24, 0x90, 0x38, 0xc5, 0x51, 0x6f, 0xc1, 0xf5, 0x4b, 0xa7, 0x82, + 0xfe, 0x07, 0xc0, 0x0c, 0x62, 0x9d, 0x5f, 0x09, 0x85, 0xdd, 0x4b, 0xb8, 0x6e, 0x06, 0xf1, 0x90, + 0x33, 0xd4, 0xdb, 0xd0, 0x7a, 0x96, 0xbe, 0x6c, 0xf3, 0x09, 0x8d, 0x75, 0xf7, 0x88, 0xaf, 0x41, + 0x09, 0xd7, 0x04, 0x63, 0xef, 0x48, 0xfd, 0xa6, 0x08, 0x6b, 0xe7, 0xd4, 0x61, 0x11, 0x50, 0x6c, + 0xe6, 0x24, 0x2a, 0x0b, 0x8a, 0xed, 0x6c, 0xd3, 0xb6, 0x92, 0x34, 0x9a, 0xff, 0xf3, 0x33, 0x3d, + 0x90, 0x29, 0x6e, 0xd1, 0x0e, 0x98, 0x43, 0xbb, 0x47, 0x36, 0x8d, 0xf8, 0xcd, 0xa3, 0x82, 0x05, + 0x81, 0x1e, 0x41, 0x33, 0x24, 0x11, 0x09, 0x4f, 0x88, 0xa5, 0x07, 0x7e, 0x48, 0x93, 0x05, 0xdb, + 0x9c, 0x6f, 0xc1, 0x0e, 0xfc, 0x90, 0xe2, 0xd5, 0x04, 0x89, 0x51, 0x11, 0x7a, 0x08, 0xab, 0xd6, + 0x99, 0x67, 0xb8, 0xb6, 0x29, 0x91, 0xab, 0x0b, 0x23, 0xaf, 0x48, 0x20, 0x0e, 0xcc, 0x6e, 0xd6, + 0x99, 0x46, 0x36, 0x31, 0xc7, 0x38, 0x22, 0x8e, 0x5c, 0x13, 0x41, 0xcc, 0xee, 0xdf, 0x8a, 0xdc, + 0xbf, 0xea, 0x1f, 0x8b, 0xd0, 0x9c, 0xdd, 0x00, 0x89, 0xfd, 0x02, 0x12, 0xda, 0xbe, 0x95, 0xb1, + 0xdf, 0x01, 0x67, 0x30, 0x1b, 0xb1, 0xe6, 0x2f, 0x63, 0x9f, 0x1a, 0x89, 0x8d, 0xcc, 0x20, 0xfe, + 0x21, 0xa3, 0xcf, 0xd9, 0xbe, 0x74, 0xce, 0xf6, 0xe8, 0x6d, 0x40, 0xd2, 0xbe, 0x8e, 0xed, 0xda, + 0x54, 0x3f, 0x3a, 0xa3, 0x44, 0xac, 0x7f, 0x09, 0x2b, 0xa2, 0x65, 0x97, 0x35, 0x7c, 0xc2, 0xf8, + 0x48, 0x85, 0x55, 0xdf, 0x77, 0xf5, 0xc8, 0xf4, 0x43, 0xa2, 0x1b, 0xd6, 0x93, 0x56, 0x85, 0x77, + 0x6c, 0xf8, 0xbe, 0x3b, 0x64, 0xbc, 0xae, 0xf5, 0x84, 0x1d, 0xb8, 0x66, 0x10, 0x47, 0x84, 0xea, + 0xec, 0xc3, 0x63, 0x54, 0x1d, 0x83, 0x60, 0xf5, 0x82, 0x38, 0xca, 0x74, 0x70, 0x89, 0xcb, 0xe2, + 0x4e, 0xa6, 0xc3, 0x1e, 0x71, 0xd9, 0x28, 0x2b, 0x07, 0x24, 0x34, 0x89, 0x47, 0x47, 0xb6, 0x79, + 0xcc, 0x42, 0x4a, 0x61, 0xbd, 0x80, 0x67, 0x78, 0xea, 0xe7, 0x50, 0xe1, 0x21, 0x88, 0x4d, 0x9e, + 0x1f, 0xdf, 0xfc, 0x74, 0x17, 0xcb, 0x5b, 0x63, 0x0c, 0x7e, 0xb6, 0xbf, 0x0a, 0xf5, 0x89, 0x1f, + 0xc9, 0xd8, 0x20, 0x3c, 0xaf, 0xc6, 0x18, 0xbc, 0xb1, 0x0d, 0xb5, 0x90, 0x18, 0x96, 0xef, 0x39, + 0x67, 0x7c, 0x5d, 0x6a, 0x38, 0xa5, 0xd5, 0x2f, 0xa1, 0x2a, 0x8e, 0xdf, 0x17, 0xc0, 0xbf, 0x09, + 0xc8, 0x14, 0x41, 0x25, 0x20, 0xa1, 0x6b, 0x47, 0x91, 0xed, 0x7b, 0x51, 0xf2, 0xfc, 0x23, 0x5a, + 0x0e, 0xa6, 0x0d, 0xea, 0xdf, 0x0b, 0x22, 0xdf, 0x11, 0x17, 0x73, 0x96, 0xc5, 0x32, 0x4f, 0x63, + 0x39, 0x59, 0x81, 0xbb, 0x47, 0x42, 0xb2, 0x5c, 0x52, 0xa6, 0x35, 0xc5, 0x45, 0xdf, 0x35, 0x24, + 0x40, 0x72, 0x1f, 0x20, 0x32, 0xed, 0x9b, 0xf7, 0x3e, 0x40, 0xc4, 0x7d, 0x80, 0xb0, 0xe4, 0x53, + 0x26, 0x5c, 0x02, 0xae, 0xcc, 0xf3, 0xad, 0x86, 0x95, 0x5e, 0xba, 0x88, 0xfa, 0xef, 0x42, 0x7a, + 0x56, 0x24, 0x97, 0x23, 0xf4, 0x05, 0xd4, 0xd8, 0xb6, 0xd3, 0x5d, 0x23, 0x90, 0x4f, 0x7d, 0xbd, + 0xc5, 0xee, 0x5d, 0x1d, 0xb6, 0xcb, 0xf6, 0x8c, 0x40, 0xa4, 0x4b, 0xcb, 0x81, 0xa0, 0xd8, 0x99, + 0x63, 0x58, 0xd3, 0x33, 0x87, 0xfd, 0xa3, 0x37, 0xa0, 0x69, 0xc4, 0xd4, 0xd7, 0x0d, 0xeb, 0x84, + 0x84, 0xd4, 0x8e, 0x88, 0xb4, 0xfd, 0x2a, 0xe3, 0x76, 0x13, 0x66, 0xfb, 0x1e, 0xac, 0x64, 0x31, + 0x9f, 0x17, 0x7d, 0x2b, 0xd9, 0xe8, 0xfb, 0x53, 0x80, 0x69, 0xde, 0xce, 0x7c, 0x84, 0x9c, 0xda, + 0x54, 0x37, 0x7d, 0x8b, 0x48, 0x53, 0xd6, 0x18, 0xa3, 0xe7, 0x5b, 0xe4, 0xdc, 0x2d, 0xa8, 0x92, + 0xdc, 0x82, 0xd8, 0xae, 0x65, 0x1b, 0xed, 0xd8, 0x76, 0x1c, 0x62, 0x49, 0x0d, 0xeb, 0xbe, 0xef, + 0x3e, 0xe0, 0x0c, 0xf5, 0xdb, 0xa2, 0xf0, 0x15, 0x71, 0x9f, 0xcd, 0x95, 0x1b, 0x7f, 0x5f, 0xa6, + 0xbe, 0x0b, 0x10, 0x51, 0x23, 0x64, 0xa9, 0x84, 0x41, 0xe5, 0x13, 0x51, 0xfb, 0xc2, 0x35, 0x6a, + 0x94, 0x3c, 0xcb, 0xe3, 0xba, 0xec, 0xdd, 0xa5, 0xe8, 0x03, 0x58, 0x31, 0x7d, 0x37, 0x70, 0x88, + 0x14, 0xae, 0x3c, 0x57, 0xb8, 0x91, 0xf6, 0xef, 0xd2, 0xcc, 0x1d, 0xaa, 0xfa, 0xa2, 0x77, 0xa8, + 0xbf, 0x14, 0xc4, 0xb5, 0x3c, 0xfb, 0x2a, 0x80, 0xc6, 0x97, 0x3c, 0x3d, 0x6f, 0x2d, 0xf8, 0xc4, + 0xf0, 0x5d, 0xef, 0xce, 0xed, 0x0f, 0xf2, 0x3c, 0xf4, 0x3e, 0x3b, 0xb9, 0xfb, 0x6b, 0x09, 0xea, + 0xe9, 0x8d, 0xfc, 0x82, 0xed, 0xef, 0x40, 0x3d, 0xad, 0x89, 0xc8, 0x03, 0xe2, 0x3b, 0xcd, 0x93, + 0x76, 0x46, 0x8f, 0x01, 0x19, 0xe3, 0x71, 0x9a, 0xb4, 0xe9, 0x71, 0x64, 0x8c, 0x93, 0xf7, 0x90, + 0x3b, 0x73, 0xac, 0x43, 0x12, 0xb7, 0x0e, 0x99, 0x3c, 0x56, 0x8c, 0xf1, 0x78, 0x86, 0x83, 0x7e, + 0x06, 0xd7, 0x67, 0xc7, 0xd0, 0x8f, 0xce, 0xf4, 0xc0, 0xb6, 0xe4, 0x1d, 0x6c, 0x7b, 0xde, 0x47, + 0x89, 0xce, 0x0c, 0xfc, 0x27, 0x67, 0x07, 0xb6, 0x25, 0xd6, 0x1c, 0x85, 0x17, 0x1a, 0xda, 0xbf, + 0x80, 0x97, 0x9f, 0xd1, 0xfd, 0x12, 0x1b, 0x0c, 0x66, 0x1f, 0xdb, 0x17, 0x5f, 0x84, 0x8c, 0xf5, + 0xfe, 0x50, 0x10, 0x6f, 0x27, 0xb3, 0x6b, 0xd2, 0xcd, 0xe6, 0xad, 0x1b, 0x39, 0xc7, 0xe9, 0x1d, + 0x1c, 0x0a, 0x78, 0x9e, 0xaa, 0x7e, 0x7a, 0x2e, 0x55, 0xcd, 0x9b, 0xc4, 0x88, 0x8c, 0x4f, 0x00, + 0x49, 0x04, 0xf5, 0x4f, 0x25, 0xa8, 0x25, 0xe8, 0xfc, 0x06, 0x75, 0x16, 0x51, 0xe2, 0xea, 0x6e, + 0x72, 0x84, 0x15, 0x30, 0x08, 0xd6, 0x1e, 0x3b, 0xc4, 0x5e, 0x85, 0x3a, 0xbb, 0xa8, 0x89, 0xe6, + 0x22, 0x6f, 0xae, 0x31, 0x06, 0x6f, 0x7c, 0x0d, 0x1a, 0xd4, 0xa7, 0x86, 0xa3, 0x53, 0x1e, 0xcb, + 0x4b, 0x42, 0x9a, 0xb3, 0x78, 0x24, 0x47, 0x6f, 0xc1, 0x15, 0x3a, 0x09, 0x7d, 0x4a, 0x1d, 0x96, + 0xdf, 0xf1, 0x8c, 0x46, 0x24, 0x20, 0x65, 0xac, 0xa4, 0x0d, 0x22, 0xd3, 0x89, 0xd8, 0xe9, 0x3d, + 0xed, 0xcc, 0x5c, 0x97, 0x1f, 0x22, 0x65, 0xbc, 0x9a, 0x72, 0x99, 0x6b, 0xb3, 0xe0, 0x19, 0x88, + 0x6c, 0x81, 0x9f, 0x15, 0x05, 0x9c, 0x90, 0x48, 0x87, 0x35, 0x97, 0x18, 0x51, 0x1c, 0x12, 0x4b, + 0x7f, 0x6c, 0x13, 0xc7, 0x12, 0x17, 0xdf, 0x66, 0xee, 0xf4, 0x3b, 0x59, 0x96, 0xce, 0x7d, 0x2e, + 0x8d, 0x9b, 0x09, 0x9c, 0xa0, 0x59, 0xe6, 0x20, 0xfe, 0xd0, 0x1a, 0x34, 0x86, 0x8f, 0x86, 0x23, + 0x6d, 0x4f, 0xdf, 0xdb, 0xef, 0x6b, 0xb2, 0x9e, 0x32, 0xd4, 0xb0, 0x20, 0x0b, 0xac, 0x7d, 0xb4, + 0x3f, 0xea, 0xee, 0xea, 0xa3, 0x9d, 0xde, 0x83, 0xa1, 0x52, 0x44, 0xd7, 0xe1, 0xca, 0x68, 0x1b, + 0xef, 0x8f, 0x46, 0xbb, 0x5a, 0x5f, 0x3f, 0xd0, 0xf0, 0xce, 0x7e, 0x7f, 0xa8, 0x94, 0x10, 0x82, + 0xe6, 0x94, 0x3d, 0xda, 0xd9, 0xd3, 0x94, 0x32, 0x6a, 0xc0, 0xf2, 0x81, 0x86, 0x7b, 0xda, 0x60, + 0xa4, 0x54, 0xd4, 0x6f, 0x4a, 0xd0, 0xc8, 0x58, 0x91, 0x39, 0x72, 0x18, 0x89, 0x3c, 0xbf, 0x8c, + 0xd9, 0x2f, 0x3b, 0x4c, 0x4c, 0xc3, 0x9c, 0x08, 0xeb, 0x94, 0xb1, 0x20, 0x78, 0x6e, 0x6f, 0x9c, + 0x66, 0xf6, 0x79, 0x19, 0xd7, 0x5c, 0xe3, 0x54, 0x80, 0xbc, 0x0e, 0x2b, 0xc7, 0x24, 0xf4, 0x88, + 0x23, 0xdb, 0x85, 0x45, 0x1a, 0x82, 0x27, 0xba, 0xac, 0x83, 0x22, 0xbb, 0x4c, 0x61, 0x84, 0x39, + 0x9a, 0x82, 0xbf, 0x97, 0x80, 0x5d, 0x83, 0x8a, 0x68, 0x5e, 0x16, 0xe3, 0x73, 0x82, 0x85, 0xa9, + 0xe8, 0xa9, 0x11, 0xf0, 0xfc, 0xae, 0x8c, 0xf9, 0x3f, 0x3a, 0xba, 0x68, 0x9f, 0x2a, 0xb7, 0xcf, + 0xdd, 0xf9, 0xdd, 0xf9, 0x59, 0x26, 0x9a, 0xa4, 0x26, 0x5a, 0x86, 0x12, 0x4e, 0x8a, 0x10, 0xbd, + 0x6e, 0x6f, 0x9b, 0x99, 0x65, 0x15, 0xea, 0x7b, 0xdd, 0xcf, 0xf4, 0xc3, 0x21, 0x7f, 0x86, 0x42, + 0x0a, 0xac, 0x3c, 0xd0, 0xf0, 0x40, 0xdb, 0x95, 0x9c, 0x12, 0xba, 0x06, 0x8a, 0xe4, 0x4c, 0xfb, + 0x95, 0x19, 0x82, 0xf8, 0xad, 0xa0, 0x1a, 0x94, 0x87, 0x0f, 0xbb, 0x07, 0x4a, 0x55, 0xfd, 0x47, + 0x11, 0xd6, 0x44, 0x58, 0x48, 0x9f, 0x4b, 0x9f, 0xfd, 0x6e, 0x99, 0x7d, 0x45, 0x28, 0xce, 0xbc, + 0x22, 0xa4, 0x49, 0x28, 0x8f, 0xea, 0xa5, 0x69, 0x12, 0xca, 0x5f, 0x1f, 0x66, 0x4e, 0xfc, 0xf2, + 0x3c, 0x27, 0x7e, 0x0b, 0x96, 0x5d, 0x12, 0xa5, 0x76, 0xab, 0xe3, 0x84, 0x44, 0x36, 0x34, 0x0c, + 0xcf, 0xf3, 0x29, 0x7f, 0xab, 0x4b, 0xae, 0x45, 0x5b, 0x73, 0xbd, 0x0a, 0xa6, 0x33, 0xee, 0x74, + 0xa7, 0x48, 0xe2, 0x60, 0xce, 0x62, 0xb7, 0x3f, 0x04, 0xe5, 0x7c, 0x87, 0x79, 0xc2, 0xe1, 0x9b, + 0xef, 0x4e, 0xa3, 0x21, 0x61, 0xfb, 0xe2, 0x70, 0xf0, 0x60, 0xb0, 0xff, 0x70, 0xa0, 0x2c, 0x31, + 0x02, 0x1f, 0x0e, 0x06, 0x3b, 0x83, 0x2d, 0xa5, 0x80, 0x00, 0xaa, 0xda, 0x67, 0x3b, 0x23, 0xad, + 0xaf, 0x14, 0x37, 0xff, 0xb9, 0x0a, 0x55, 0xa1, 0x24, 0xfa, 0x5a, 0x66, 0x02, 0xd9, 0x52, 0x3c, + 0xfa, 0x70, 0xee, 0x8c, 0x7a, 0xa6, 0xbc, 0xdf, 0xfe, 0x68, 0x61, 0x79, 0xf9, 0xdc, 0xbd, 0x84, + 0x7e, 0x5b, 0x80, 0x95, 0x99, 0xf7, 0xdd, 0xbc, 0x4f, 0x93, 0x97, 0x54, 0xfe, 0xdb, 0x3f, 0x58, + 0x48, 0x36, 0xd5, 0xe5, 0x37, 0x05, 0x68, 0x64, 0x6a, 0xde, 0xe8, 0xee, 0x22, 0x75, 0x72, 0xa1, + 0xc9, 0xbd, 0xc5, 0x4b, 0xec, 0xea, 0xd2, 0x3b, 0x05, 0xf4, 0xeb, 0x02, 0x34, 0x32, 0xd5, 0xdf, + 0xdc, 0xaa, 0x5c, 0xac, 0x55, 0xe7, 0x56, 0xe5, 0xb2, 0x62, 0xf3, 0x12, 0xfa, 0x65, 0x01, 0xea, + 0x69, 0x25, 0x17, 0xdd, 0x9e, 0xbf, 0xf6, 0x2b, 0x94, 0xb8, 0xb3, 0x68, 0xd1, 0x58, 0x5d, 0x42, + 0x3f, 0x87, 0x5a, 0x52, 0xf6, 0x44, 0x79, 0xa3, 0xd7, 0xb9, 0x9a, 0x6a, 0xfb, 0xf6, 0xdc, 0x72, + 0xd9, 0xe1, 0x93, 0x5a, 0x64, 0xee, 0xe1, 0xcf, 0x55, 0x4d, 0xdb, 0xb7, 0xe7, 0x96, 0x4b, 0x87, + 0x67, 0x9e, 0x90, 0x29, 0x59, 0xe6, 0xf6, 0x84, 0x8b, 0xb5, 0xd2, 0xdc, 0x9e, 0x70, 0x59, 0x85, + 0x54, 0x28, 0x92, 0x29, 0x7a, 0xe6, 0x56, 0xe4, 0x62, 0x61, 0x35, 0xb7, 0x22, 0x97, 0xd4, 0x58, + 0xd5, 0x25, 0xf4, 0x55, 0x21, 0x7b, 0x2f, 0xb8, 0x3d, 0x77, 0x6d, 0x6f, 0x4e, 0x97, 0xbc, 0x50, + 0x5d, 0xe4, 0x1b, 0xf4, 0x2b, 0xf9, 0x8a, 0x21, 0x4a, 0x83, 0x68, 0x1e, 0xb0, 0x99, 0x6a, 0x62, + 0xfb, 0xd6, 0x62, 0xc1, 0x86, 0x2b, 0xf1, 0xab, 0x02, 0xc0, 0xb4, 0x88, 0x98, 0x5b, 0x89, 0x0b, + 0xd5, 0xcb, 0xf6, 0xdd, 0x05, 0x24, 0xb3, 0x1b, 0x24, 0xa9, 0x1b, 0xe6, 0xde, 0x20, 0xe7, 0x8a, + 0x9c, 0xb9, 0x37, 0xc8, 0xf9, 0x02, 0xa5, 0xba, 0xf4, 0xc9, 0xf2, 0x8f, 0x2a, 0x22, 0xfa, 0x57, + 0xf9, 0xe7, 0xbd, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xde, 0xd5, 0xff, 0xa7, 0x27, 0x00, + 0x00, +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.proto b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.proto new file mode 100644 index 0000000..ed380aa --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/proto/driver.proto @@ -0,0 +1,606 @@ +syntax = "proto3"; +package hashicorp.nomad.plugins.drivers.proto; +option go_package = "proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +import "github.com/hashicorp/nomad/plugins/shared/hclspec/hcl_spec.proto"; +import "github.com/hashicorp/nomad/plugins/shared/structs/proto/attribute.proto"; + +// Driver service defines RPCs used to communicate with a nomad runtime driver. +// Some rpcs may not be implemented by the driver based on it's capabilities. +service Driver { + + // TaskConfigSchema returns the schema for parsing the driver + // configuration of a task. + rpc TaskConfigSchema(TaskConfigSchemaRequest) returns (TaskConfigSchemaResponse) {} + + // Capabilities returns a set of features which the driver implements. Some + // RPCs are not possible to implement on some runtimes, this allows the + // driver to indicate if it doesn't support these RPCs and features. + rpc Capabilities(CapabilitiesRequest) returns (CapabilitiesResponse) {} + + // Fingerprint starts a stream which emits information about the driver + // including whether the driver healthy and able to function in the + // existing environment. + // + // The driver should immediately stream a FingerprintResponse when the RPC + // is initially called, then send any additional responses if there is a + // change in the driver's state. + rpc Fingerprint(FingerprintRequest) returns (stream FingerprintResponse) {} + + // RecoverTask is used when a task has been started but the driver may not + // know about it. Such is the case if the driver restarts or is upgraded. + rpc RecoverTask(RecoverTaskRequest) returns (RecoverTaskResponse) {} + + // StartTask starts and tracks the task on the implemented runtime + rpc StartTask(StartTaskRequest) returns (StartTaskResponse) {} + + // WaitTask blocks until the given task exits, returning the result of the + // task. It may be called after the task has exited, but before the task is + // destroyed. + rpc WaitTask(WaitTaskRequest) returns (WaitTaskResponse) {} + + // StopTask stops a given task by sending the desired signal to the process. + // If the task does not exit on its own within the given timeout, it will be + // forcefully killed. + rpc StopTask(StopTaskRequest) returns (StopTaskResponse) {} + + // DestroyTask removes the task from the driver's internal state and cleans + // up any additional resources created by the driver. It cannot be called + // on a running task, unless force is set to true. + rpc DestroyTask(DestroyTaskRequest) returns (DestroyTaskResponse) {} + + // InspectTask returns detailed information for the given task + rpc InspectTask(InspectTaskRequest) returns (InspectTaskResponse) {} + + // TaskStats collects and returns runtime metrics for the given task + rpc TaskStats(TaskStatsRequest) returns (stream TaskStatsResponse) {} + + // TaskEvents starts a streaming RPC where all task events emitted by the + // driver are streamed to the caller. + rpc TaskEvents(TaskEventsRequest) returns (stream DriverTaskEvent) {} + + // The following RPCs are only implemented if the driver sets the + // corresponding capability. + + // SignalTask sends a signal to the task + rpc SignalTask(SignalTaskRequest) returns (SignalTaskResponse) {} + + // ExecTask executes a command inside the tasks execution context + rpc ExecTask(ExecTaskRequest) returns (ExecTaskResponse) {} +} + +message TaskConfigSchemaRequest {} + +message TaskConfigSchemaResponse { + + // Spec is the configuration schema for the job driver config stanza + hashicorp.nomad.plugins.shared.hclspec.Spec spec = 1; +} + +message CapabilitiesRequest {} + +message CapabilitiesResponse { + + // Capabilities provides a way for the driver to denote if it implements + // non-core RPCs. Some Driver service RPCs expose additional information + // or functionality outside of the core task management functions. These + // RPCs are only implemented if the driver sets the corresponding capability. + DriverCapabilities capabilities = 1; +} + +message FingerprintRequest {} + +message FingerprintResponse { + + + // Attributes are key/value pairs that annotate the nomad client and can be + // used in scheduling contraints and affinities. + map attributes = 1; + + enum HealthState { + UNDETECTED = 0; + UNHEALTHY = 1; + HEALTHY = 2; + + } + + // Health is used to determine the state of the health the driver is in. + // Health can be one of the following states: + // * UNDETECTED: driver dependencies are not met and the driver can not start + // * UNHEALTHY: driver dependencies are met but the driver is unable to + // perform operations due to some other problem + // * HEALTHY: driver is able to perform all operations + HealthState health = 2; + + // HealthDescription is a human readable message describing the current + // state of driver health + string health_description = 3; +} + +message RecoverTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // Handle is the TaskHandle returned from StartTask + TaskHandle handle = 2; +} + +message RecoverTaskResponse {} + +message StartTaskRequest { + + // Task configuration to launch + TaskConfig task = 1; + +} + +message StartTaskResponse { + + enum Result { + SUCCESS = 0; + RETRY = 1; + FATAL = 2; + } + + // Result is set depending on the type of error that occurred while starting + // a task: + // + // * SUCCESS: No error occurred, handle is set + // * RETRY: An error occurred, but is recoverable and the RPC should be retried + // * FATAL: A fatal error occurred and is not likely to succeed if retried + // + // If Result is not successful, the DriverErrorMsg will be set. + Result result = 1; + + // DriverErrorMsg is set if an error occurred + string driver_error_msg = 2; + + // Handle is opaque to the client, but must be stored in order to recover + // the task. + TaskHandle handle = 3; + + // NetworkOverride is set if the driver sets network settings and the service ip/port + // needs to be set differently. + NetworkOverride network_override = 4; +} + +message WaitTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; +} + +message WaitTaskResponse { + + // Result is the exit status of the task + ExitResult result = 1; + // Err is set if any driver error occurred while waiting for the task + string err = 2; +} + +message StopTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // Timeout defines the amount of time to wait before forcefully killing + // the task. For example, on Unix clients, this means sending a SIGKILL to + // the process. + google.protobuf.Duration timeout = 2; + + // Signal can be set to override the Task's configured shutdown signal + string signal = 3; +} + +message StopTaskResponse {} + +message DestroyTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // Force destroys the task even if it is still in a running state + bool force = 2; +} + +message DestroyTaskResponse {} + +message InspectTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; +} + +message InspectTaskResponse { + + // Task details + TaskStatus task = 1; + + // Driver details for task + TaskDriverStatus driver = 2; + + // NetworkOverride info if set + NetworkOverride network_override = 3; +} + +message TaskStatsRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // CollectionInterval is the interval at which to stream stats to the caller + google.protobuf.Duration collection_interval = 2; +} + +message TaskStatsResponse { + + // Stats for the task + TaskStats stats = 1; +} + +message TaskEventsRequest {} + +message SignalTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // Signal is the operating system signal to send to the task. Ex: SIGHUP + string signal = 2; +} + +message SignalTaskResponse {} + +message ExecTaskRequest { + + // TaskId is the ID of the target task + string task_id = 1; + + // Command is the command to execute in the task environment + repeated string command = 2; + + // Timeout is the amount of time to wait for the command to stop. + // Defaults to 0 (run forever) + google.protobuf.Duration timeout = 3; +} + +message ExecTaskResponse { + + // Stdout from the exec + bytes stdout = 1; + + // Stderr from the exec + bytes stderr = 2; + + // Result from the exec + ExitResult result = 3; +} + +message DriverCapabilities { + + // SendSignals indicates that the driver can send process signals (ex. SIGUSR1) + // to the task. + bool send_signals = 1; + + // Exec indicates that the driver supports executing arbitrary commands + // in the task's execution environment. + bool exec = 2; + + enum FSIsolation { + NONE = 0; + CHROOT = 1; + IMAGE = 2; + } + // FsIsolation indicates what kind of filesystem isolation a driver supports. + FSIsolation fs_isolation = 3; +} + +message TaskConfig { + + // Id of the task, recommended to the globally unique, must be unique to the driver. + string id = 1; + + // Name of the task + string name = 2; + + // MsgpackDriverConfig is the encoded driver configuation of the task + bytes msgpack_driver_config = 3; + + // Env is the a set of key/value pairs to be set as environment variables + map env = 4; + + // DeviceEnv is the set of environment variables that are defined by device + // plugins. This allows the driver to differentiate environment variables + // set by the device plugins and those by the user. When populating the + // task's environment env should be used. + map device_env = 5; + + // Resources defines the resources to isolate + Resources resources = 6; + + // Mounts is a list of targets to bind mount into the task directory + repeated Mount mounts = 7; + + // Devices is a list of system devices to mount into the task's execution + // environment. + repeated Device devices = 8; + + // User defines the operating system user the tasks should run as + string user = 9; + + // AllocDir is the directory on the host where the allocation directory + // exists. + string alloc_dir = 10; + + // StdoutPath is the path to the file to open and write task stdout to + string stdout_path = 11; + + // StderrPath is the path to the file to open and write task stderr to + string stderr_path = 12; + + // TaskGroupName is the name of the task group which this task is a member of + string task_group_name = 13; + + // JobName is the name of the job of which this task is part of + string job_name = 14; + + // AllocId is the ID of the associated allocation + string alloc_id = 15; +} + +message Resources { + + // AllocatedResources are the resources set for the task + AllocatedTaskResources allocated_resources = 1; + + // LinuxResources are the computed values to set for specific Linux features + LinuxResources linux_resources = 2; +} + +message AllocatedTaskResources { + AllocatedCpuResources cpu = 1; + AllocatedMemoryResources memory = 2; + repeated NetworkResource networks = 5; +} + +message AllocatedCpuResources { + int64 cpu_shares = 1; +} + +message AllocatedMemoryResources { + int64 memory_mb = 2; +} + +message NetworkResource { + string device = 1; + string cidr = 2; + string ip = 3; + int32 mbits = 4; + repeated NetworkPort reserved_ports = 5; + repeated NetworkPort dynamic_ports = 6; +} + +message NetworkPort { + string label = 1; + int32 value = 2; +} + +message LinuxResources { + + // CPU CFS (Completely Fair Scheduler) period. Default: 0 (not specified) + int64 cpu_period = 1; + // CPU CFS (Completely Fair Scheduler) quota. Default: 0 (not specified) + int64 cpu_quota = 2; + // CPU shares (relative weight vs. other containers). Default: 0 (not specified) + int64 cpu_shares = 3; + // Memory limit in bytes. Default: 0 (not specified) + int64 memory_limit_bytes = 4; + // OOMScoreAdj adjusts the oom-killer score. Default: 0 (not specified) + int64 oom_score_adj = 5; + // CpusetCpus constrains the allowed set of logical CPUs. Default: "" (not specified) + string cpuset_cpus = 6; + // CpusetMems constrains the allowed set of memory nodes. Default: "" (not specified) + string cpuset_mems = 7; + // PercentTicks is a compatibility option for docker and should not be used + double PercentTicks = 8; +} + +message Mount { + + // TaskPath is the file path within the task directory to mount to + string task_path = 1; + + // HostPath is the file path on the host to mount from + string host_path = 2; + + // Readonly if set true, mounts the path in readonly mode + bool readonly = 3; +} + +message Device { + + // TaskPath is the file path within the task to mount the device to + string task_path = 1; + + // HostPath is the path on the host to the source device + string host_path = 2; + + // CgroupPermissions defines the Cgroup permissions of the device. + // One or more of the following options can be set: + // * r - allows the task to read from the specified device. + // * w - allows the task to write to the specified device. + // * m - allows the task to create device files that do not yet exist. + // + // Example: "rw" + string cgroup_permissions = 3; +} + +enum TaskState { + UNKNOWN = 0; + RUNNING = 1; + EXITED = 2; +} + +// TaskHandle is created when starting a task and is used to recover task +message TaskHandle { + + // Version is used by the driver to version the DriverState schema. + // Version 0 is reserved by Nomad and should not be used. + int32 version = 1; + + // Config is the TaskConfig for the task + TaskConfig config = 2; + + // State is the state of the task's execution + TaskState state = 3; + + // DriverState is the encoded state for the specific driver + bytes driver_state = 4; +} + +// NetworkOverride contains network settings which the driver may override +// for the task, such as when the driver is setting up the task's network. +message NetworkOverride { + + // PortMap can be set to replace ports with driver-specific mappings + map port_map = 1; + + // Addr is the IP address for the task created by the driver + string addr = 2; + + // AutoAdvertise indicates whether the driver thinks services that choose + // to auto_advertise_addresses should use this IP instead of the host's. + bool auto_advertise = 3; +} + +// ExitResult contains information about the exit status of a task +message ExitResult { + + // ExitCode returned from the task on exit + int32 exit_code = 1; + + // Signal is set if a signal was sent to the task + int32 signal = 2; + + // OomKilled is true if the task exited as a result of the OOM Killer + bool oom_killed = 3; + +} + +// TaskStatus includes information of a specific task +message TaskStatus { + string id = 1; + string name = 2; + + // State is the state of the task's execution + TaskState state = 3; + + // StartedAt is the timestamp when the task was started + google.protobuf.Timestamp started_at = 4; + + // CompletedAt is the timestamp when the task exited. + // If the task is still running, CompletedAt will not be set + google.protobuf.Timestamp completed_at = 5; + + // Result is set when CompletedAt is set. + ExitResult result = 6; +} + +message TaskDriverStatus { + + // Attributes is a set of string/string key value pairs specific to the + // implementing driver + map attributes = 1; +} + +message TaskStats { + + // Id of the task + string id = 1; + + // Timestamp for which the stats were collected + google.protobuf.Timestamp timestamp = 2; + + // AggResourceUsage is the aggreate usage of all processes + TaskResourceUsage agg_resource_usage = 3; + + // ResourceUsageByPid breaks the usage stats by process + map resource_usage_by_pid = 4; +} + +message TaskResourceUsage { + + // CPU usage stats + CPUUsage cpu = 1; + + // Memory usage stats + MemoryUsage memory = 2; +} + +message CPUUsage { + + double system_mode = 1; + double user_mode = 2; + double total_ticks = 3; + uint64 throttled_periods = 4; + uint64 throttled_time = 5; + double percent = 6; + + enum Fields { + SYSTEM_MODE = 0; + USER_MODE = 1; + TOTAL_TICKS = 2; + THROTTLED_PERIODS = 3; + THROTTLED_TIME = 4; + PERCENT = 5; + } + // MeasuredFields indicates which fields were actually sampled + repeated Fields measured_fields = 7; +} + +message MemoryUsage { + uint64 rss = 1; + uint64 cache = 2; + uint64 max_usage = 3; + uint64 kernel_usage = 4; + uint64 kernel_max_usage = 5; + uint64 usage = 7; + uint64 swap = 8; + + enum Fields { + RSS = 0; + CACHE = 1; + MAX_USAGE = 2; + KERNEL_USAGE = 3; + KERNEL_MAX_USAGE = 4; + USAGE = 5; + SWAP = 6; + } + // MeasuredFields indicates which fields were actually sampled + repeated Fields measured_fields = 6; +} + +message DriverTaskEvent { + + // TaskId is the id of the task for the event + string task_id = 1; + + // AllocId of the task for the event + string alloc_id = 2; + + // TaskName is the name of the task for the event + string task_name = 3; + + // Timestamp when the event occurred + google.protobuf.Timestamp timestamp = 4; + + // Message is the body of the event + string message = 5; + + // Annotations allows for additional key/value data to be sent along with the event + map annotations = 6; +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/server.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/server.go new file mode 100644 index 0000000..6de80d9 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/server.go @@ -0,0 +1,322 @@ +package drivers + +import ( + "fmt" + "io" + + "github.com/golang/protobuf/ptypes" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/drivers/proto" + dstructs "github.com/hashicorp/nomad/plugins/shared/structs" + sproto "github.com/hashicorp/nomad/plugins/shared/structs/proto" + context "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type driverPluginServer struct { + broker *plugin.GRPCBroker + impl DriverPlugin +} + +func (b *driverPluginServer) TaskConfigSchema(ctx context.Context, req *proto.TaskConfigSchemaRequest) (*proto.TaskConfigSchemaResponse, error) { + spec, err := b.impl.TaskConfigSchema() + if err != nil { + return nil, err + } + + resp := &proto.TaskConfigSchemaResponse{ + Spec: spec, + } + return resp, nil +} + +func (b *driverPluginServer) Capabilities(ctx context.Context, req *proto.CapabilitiesRequest) (*proto.CapabilitiesResponse, error) { + caps, err := b.impl.Capabilities() + if err != nil { + return nil, err + } + resp := &proto.CapabilitiesResponse{ + Capabilities: &proto.DriverCapabilities{ + SendSignals: caps.SendSignals, + Exec: caps.Exec, + }, + } + + switch caps.FSIsolation { + case FSIsolationNone: + resp.Capabilities.FsIsolation = proto.DriverCapabilities_NONE + case FSIsolationChroot: + resp.Capabilities.FsIsolation = proto.DriverCapabilities_CHROOT + case FSIsolationImage: + resp.Capabilities.FsIsolation = proto.DriverCapabilities_IMAGE + default: + resp.Capabilities.FsIsolation = proto.DriverCapabilities_NONE + } + return resp, nil +} + +func (b *driverPluginServer) Fingerprint(req *proto.FingerprintRequest, srv proto.Driver_FingerprintServer) error { + ctx := srv.Context() + ch, err := b.impl.Fingerprint(ctx) + if err != nil { + return err + } + + for { + select { + case <-ctx.Done(): + return nil + case f, ok := <-ch: + + if !ok { + return nil + } + resp := &proto.FingerprintResponse{ + Attributes: dstructs.ConvertStructAttributeMap(f.Attributes), + Health: healthStateToProto(f.Health), + HealthDescription: f.HealthDescription, + } + + if err := srv.Send(resp); err != nil { + return err + } + } + } +} + +func (b *driverPluginServer) RecoverTask(ctx context.Context, req *proto.RecoverTaskRequest) (*proto.RecoverTaskResponse, error) { + err := b.impl.RecoverTask(taskHandleFromProto(req.Handle)) + if err != nil { + return nil, err + } + + return &proto.RecoverTaskResponse{}, nil +} + +func (b *driverPluginServer) StartTask(ctx context.Context, req *proto.StartTaskRequest) (*proto.StartTaskResponse, error) { + handle, net, err := b.impl.StartTask(taskConfigFromProto(req.Task)) + if err != nil { + if rec, ok := err.(structs.Recoverable); ok { + st := status.New(codes.FailedPrecondition, rec.Error()) + st, err := st.WithDetails(&sproto.RecoverableError{Recoverable: rec.IsRecoverable()}) + if err != nil { + // If this error, it will always error + panic(err) + } + return nil, st.Err() + } + return nil, err + } + + var pbNet *proto.NetworkOverride + if net != nil { + pbNet = &proto.NetworkOverride{ + PortMap: map[string]int32{}, + Addr: net.IP, + AutoAdvertise: net.AutoAdvertise, + } + for k, v := range net.PortMap { + pbNet.PortMap[k] = int32(v) + } + } + + resp := &proto.StartTaskResponse{ + Handle: taskHandleToProto(handle), + NetworkOverride: pbNet, + } + + return resp, nil +} + +func (b *driverPluginServer) WaitTask(ctx context.Context, req *proto.WaitTaskRequest) (*proto.WaitTaskResponse, error) { + ch, err := b.impl.WaitTask(ctx, req.TaskId) + if err != nil { + return nil, err + } + + var ok bool + var result *ExitResult + select { + case <-ctx.Done(): + return nil, ctx.Err() + case result, ok = <-ch: + if !ok { + return &proto.WaitTaskResponse{ + Err: "channel closed", + }, nil + } + } + + var errStr string + if result.Err != nil { + errStr = result.Err.Error() + } + + resp := &proto.WaitTaskResponse{ + Err: errStr, + Result: &proto.ExitResult{ + ExitCode: int32(result.ExitCode), + Signal: int32(result.Signal), + OomKilled: result.OOMKilled, + }, + } + + return resp, nil +} + +func (b *driverPluginServer) StopTask(ctx context.Context, req *proto.StopTaskRequest) (*proto.StopTaskResponse, error) { + timeout, err := ptypes.Duration(req.Timeout) + if err != nil { + return nil, err + } + + err = b.impl.StopTask(req.TaskId, timeout, req.Signal) + if err != nil { + return nil, err + } + return &proto.StopTaskResponse{}, nil +} + +func (b *driverPluginServer) DestroyTask(ctx context.Context, req *proto.DestroyTaskRequest) (*proto.DestroyTaskResponse, error) { + err := b.impl.DestroyTask(req.TaskId, req.Force) + if err != nil { + return nil, err + } + return &proto.DestroyTaskResponse{}, nil +} + +func (b *driverPluginServer) InspectTask(ctx context.Context, req *proto.InspectTaskRequest) (*proto.InspectTaskResponse, error) { + status, err := b.impl.InspectTask(req.TaskId) + if err != nil { + return nil, err + } + + protoStatus, err := taskStatusToProto(status) + if err != nil { + return nil, err + } + + var pbNet *proto.NetworkOverride + if status.NetworkOverride != nil { + pbNet = &proto.NetworkOverride{ + PortMap: map[string]int32{}, + Addr: status.NetworkOverride.IP, + AutoAdvertise: status.NetworkOverride.AutoAdvertise, + } + for k, v := range status.NetworkOverride.PortMap { + pbNet.PortMap[k] = int32(v) + } + } + + resp := &proto.InspectTaskResponse{ + Task: protoStatus, + Driver: &proto.TaskDriverStatus{ + Attributes: status.DriverAttributes, + }, + NetworkOverride: pbNet, + } + + return resp, nil +} + +func (b *driverPluginServer) TaskStats(req *proto.TaskStatsRequest, srv proto.Driver_TaskStatsServer) error { + interval, err := ptypes.Duration(req.CollectionInterval) + if err != nil { + return fmt.Errorf("failed to parse collection interval: %v", err) + } + + ch, err := b.impl.TaskStats(srv.Context(), req.TaskId, interval) + if err != nil { + if rec, ok := err.(structs.Recoverable); ok { + st := status.New(codes.FailedPrecondition, rec.Error()) + st, err := st.WithDetails(&sproto.RecoverableError{Recoverable: rec.IsRecoverable()}) + if err != nil { + // If this error, it will always error + panic(err) + } + return st.Err() + } + return err + } + + for stats := range ch { + pb, err := TaskStatsToProto(stats) + if err != nil { + return fmt.Errorf("failed to encode task stats: %v", err) + } + + if err = srv.Send(&proto.TaskStatsResponse{Stats: pb}); err == io.EOF { + break + } else if err != nil { + return err + } + + } + + return nil +} + +func (b *driverPluginServer) ExecTask(ctx context.Context, req *proto.ExecTaskRequest) (*proto.ExecTaskResponse, error) { + timeout, err := ptypes.Duration(req.Timeout) + if err != nil { + return nil, err + } + + result, err := b.impl.ExecTask(req.TaskId, req.Command, timeout) + if err != nil { + return nil, err + } + resp := &proto.ExecTaskResponse{ + Stdout: result.Stdout, + Stderr: result.Stderr, + Result: exitResultToProto(result.ExitResult), + } + + return resp, nil +} + +func (b *driverPluginServer) SignalTask(ctx context.Context, req *proto.SignalTaskRequest) (*proto.SignalTaskResponse, error) { + err := b.impl.SignalTask(req.TaskId, req.Signal) + if err != nil { + return nil, err + } + + resp := &proto.SignalTaskResponse{} + return resp, nil +} + +func (b *driverPluginServer) TaskEvents(req *proto.TaskEventsRequest, srv proto.Driver_TaskEventsServer) error { + ch, err := b.impl.TaskEvents(srv.Context()) + if err != nil { + return err + } + + for { + event := <-ch + if event == nil { + break + } + pbTimestamp, err := ptypes.TimestampProto(event.Timestamp) + if err != nil { + return err + } + + pbEvent := &proto.DriverTaskEvent{ + TaskId: event.TaskID, + AllocId: event.AllocID, + TaskName: event.TaskName, + Timestamp: pbTimestamp, + Message: event.Message, + Annotations: event.Annotations, + } + + if err = srv.Send(pbEvent); err == io.EOF { + break + } else if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/task_handle.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/task_handle.go new file mode 100644 index 0000000..0f9811a --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/task_handle.go @@ -0,0 +1,47 @@ +package drivers + +import ( + "github.com/hashicorp/nomad/plugins/base" +) + +// TaskHandle is the state shared between a driver and the client. +// It is returned to the client after starting the task and used +// for recovery of tasks during a driver restart. +type TaskHandle struct { + // Version is set by the driver an allows it to handle upgrading from + // an older DriverState struct. Prior to 0.9 the only state stored for + // driver was the reattach config for the executor. To allow upgrading to + // 0.9, Version 0 is handled as if it is the json encoded reattach config. + Version int + Config *TaskConfig + State TaskState + DriverState []byte +} + +func NewTaskHandle(version int) *TaskHandle { + return &TaskHandle{Version: version} +} + +func (h *TaskHandle) SetDriverState(v interface{}) error { + h.DriverState = []byte{} + return base.MsgPackEncode(&h.DriverState, v) +} + +func (h *TaskHandle) GetDriverState(v interface{}) error { + return base.MsgPackDecode(h.DriverState, v) + +} + +func (h *TaskHandle) Copy() *TaskHandle { + if h == nil { + return nil + } + + handle := new(TaskHandle) + handle.Version = h.Version + handle.Config = h.Config.Copy() + handle.State = h.State + handle.DriverState = make([]byte, len(h.DriverState)) + copy(handle.DriverState, h.DriverState) + return handle +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/utils.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/utils.go new file mode 100644 index 0000000..f7b7a51 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/utils.go @@ -0,0 +1,573 @@ +package drivers + +import ( + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/drivers/proto" +) + +var taskStateToProtoMap = map[TaskState]proto.TaskState{ + TaskStateUnknown: proto.TaskState_UNKNOWN, + TaskStateRunning: proto.TaskState_RUNNING, + TaskStateExited: proto.TaskState_EXITED, +} + +var taskStateFromProtoMap = map[proto.TaskState]TaskState{ + proto.TaskState_UNKNOWN: TaskStateUnknown, + proto.TaskState_RUNNING: TaskStateRunning, + proto.TaskState_EXITED: TaskStateExited, +} + +func healthStateToProto(health HealthState) proto.FingerprintResponse_HealthState { + switch health { + case HealthStateUndetected: + return proto.FingerprintResponse_UNDETECTED + case HealthStateUnhealthy: + return proto.FingerprintResponse_UNHEALTHY + case HealthStateHealthy: + return proto.FingerprintResponse_HEALTHY + } + return proto.FingerprintResponse_UNDETECTED +} + +func healthStateFromProto(pb proto.FingerprintResponse_HealthState) HealthState { + switch pb { + case proto.FingerprintResponse_UNDETECTED: + return HealthStateUndetected + case proto.FingerprintResponse_UNHEALTHY: + return HealthStateUnhealthy + case proto.FingerprintResponse_HEALTHY: + return HealthStateHealthy + } + return HealthStateUndetected +} + +func taskConfigFromProto(pb *proto.TaskConfig) *TaskConfig { + if pb == nil { + return &TaskConfig{} + } + return &TaskConfig{ + ID: pb.Id, + JobName: pb.JobName, + TaskGroupName: pb.TaskGroupName, + Name: pb.Name, + Env: pb.Env, + DeviceEnv: pb.DeviceEnv, + rawDriverConfig: pb.MsgpackDriverConfig, + Resources: ResourcesFromProto(pb.Resources), + Devices: DevicesFromProto(pb.Devices), + Mounts: MountsFromProto(pb.Mounts), + User: pb.User, + AllocDir: pb.AllocDir, + StdoutPath: pb.StdoutPath, + StderrPath: pb.StderrPath, + AllocID: pb.AllocId, + } +} + +func taskConfigToProto(cfg *TaskConfig) *proto.TaskConfig { + if cfg == nil { + return &proto.TaskConfig{} + } + pb := &proto.TaskConfig{ + Id: cfg.ID, + JobName: cfg.JobName, + TaskGroupName: cfg.TaskGroupName, + Name: cfg.Name, + Env: cfg.Env, + DeviceEnv: cfg.DeviceEnv, + Resources: ResourcesToProto(cfg.Resources), + Devices: DevicesToProto(cfg.Devices), + Mounts: MountsToProto(cfg.Mounts), + User: cfg.User, + AllocDir: cfg.AllocDir, + MsgpackDriverConfig: cfg.rawDriverConfig, + StdoutPath: cfg.StdoutPath, + StderrPath: cfg.StderrPath, + AllocId: cfg.AllocID, + } + return pb +} + +func ResourcesFromProto(pb *proto.Resources) *Resources { + var r Resources + if pb == nil { + return &r + } + + if pb.AllocatedResources != nil { + r.NomadResources = &structs.AllocatedTaskResources{} + + if pb.AllocatedResources.Cpu != nil { + r.NomadResources.Cpu.CpuShares = pb.AllocatedResources.Cpu.CpuShares + } + + if pb.AllocatedResources.Memory != nil { + r.NomadResources.Memory.MemoryMB = pb.AllocatedResources.Memory.MemoryMb + } + + for _, network := range pb.AllocatedResources.Networks { + var n structs.NetworkResource + n.Device = network.Device + n.IP = network.Ip + n.CIDR = network.Cidr + n.MBits = int(network.Mbits) + for _, port := range network.ReservedPorts { + n.ReservedPorts = append(n.ReservedPorts, structs.Port{ + Label: port.Label, + Value: int(port.Value), + }) + } + for _, port := range network.DynamicPorts { + n.DynamicPorts = append(n.DynamicPorts, structs.Port{ + Label: port.Label, + Value: int(port.Value), + }) + } + r.NomadResources.Networks = append(r.NomadResources.Networks, &n) + } + } + + if pb.LinuxResources != nil { + r.LinuxResources = &LinuxResources{ + CPUPeriod: pb.LinuxResources.CpuPeriod, + CPUQuota: pb.LinuxResources.CpuQuota, + CPUShares: pb.LinuxResources.CpuShares, + MemoryLimitBytes: pb.LinuxResources.MemoryLimitBytes, + OOMScoreAdj: pb.LinuxResources.OomScoreAdj, + CpusetCPUs: pb.LinuxResources.CpusetCpus, + CpusetMems: pb.LinuxResources.CpusetMems, + PercentTicks: pb.LinuxResources.PercentTicks, + } + } + + return &r +} + +func ResourcesToProto(r *Resources) *proto.Resources { + if r == nil { + return nil + } + + var pb proto.Resources + if r.NomadResources != nil { + pb.AllocatedResources = &proto.AllocatedTaskResources{ + Cpu: &proto.AllocatedCpuResources{ + CpuShares: r.NomadResources.Cpu.CpuShares, + }, + Memory: &proto.AllocatedMemoryResources{ + MemoryMb: r.NomadResources.Memory.MemoryMB, + }, + Networks: make([]*proto.NetworkResource, len(r.NomadResources.Networks)), + } + + for i, network := range r.NomadResources.Networks { + var n proto.NetworkResource + n.Device = network.Device + n.Ip = network.IP + n.Cidr = network.CIDR + n.Mbits = int32(network.MBits) + n.ReservedPorts = []*proto.NetworkPort{} + for _, port := range network.ReservedPorts { + n.ReservedPorts = append(n.ReservedPorts, &proto.NetworkPort{ + Label: port.Label, + Value: int32(port.Value), + }) + } + for _, port := range network.DynamicPorts { + n.DynamicPorts = append(n.DynamicPorts, &proto.NetworkPort{ + Label: port.Label, + Value: int32(port.Value), + }) + } + pb.AllocatedResources.Networks[i] = &n + } + } + + if r.LinuxResources != nil { + pb.LinuxResources = &proto.LinuxResources{ + CpuPeriod: r.LinuxResources.CPUPeriod, + CpuQuota: r.LinuxResources.CPUQuota, + CpuShares: r.LinuxResources.CPUShares, + MemoryLimitBytes: r.LinuxResources.MemoryLimitBytes, + OomScoreAdj: r.LinuxResources.OOMScoreAdj, + CpusetCpus: r.LinuxResources.CpusetCPUs, + CpusetMems: r.LinuxResources.CpusetMems, + PercentTicks: r.LinuxResources.PercentTicks, + } + } + + return &pb +} + +func DevicesFromProto(devices []*proto.Device) []*DeviceConfig { + if devices == nil { + return nil + } + + out := make([]*DeviceConfig, len(devices)) + for i, d := range devices { + out[i] = DeviceFromProto(d) + } + + return out +} + +func DeviceFromProto(device *proto.Device) *DeviceConfig { + if device == nil { + return nil + } + + return &DeviceConfig{ + TaskPath: device.TaskPath, + HostPath: device.HostPath, + Permissions: device.CgroupPermissions, + } +} + +func MountsFromProto(mounts []*proto.Mount) []*MountConfig { + if mounts == nil { + return nil + } + + out := make([]*MountConfig, len(mounts)) + for i, m := range mounts { + out[i] = MountFromProto(m) + } + + return out +} + +func MountFromProto(mount *proto.Mount) *MountConfig { + if mount == nil { + return nil + } + + return &MountConfig{ + TaskPath: mount.TaskPath, + HostPath: mount.HostPath, + Readonly: mount.Readonly, + } +} + +func DevicesToProto(devices []*DeviceConfig) []*proto.Device { + if devices == nil { + return nil + } + + out := make([]*proto.Device, len(devices)) + for i, d := range devices { + out[i] = DeviceToProto(d) + } + + return out +} + +func DeviceToProto(device *DeviceConfig) *proto.Device { + if device == nil { + return nil + } + + return &proto.Device{ + TaskPath: device.TaskPath, + HostPath: device.HostPath, + CgroupPermissions: device.Permissions, + } +} + +func MountsToProto(mounts []*MountConfig) []*proto.Mount { + if mounts == nil { + return nil + } + + out := make([]*proto.Mount, len(mounts)) + for i, m := range mounts { + out[i] = MountToProto(m) + } + + return out +} + +func MountToProto(mount *MountConfig) *proto.Mount { + if mount == nil { + return nil + } + + return &proto.Mount{ + TaskPath: mount.TaskPath, + HostPath: mount.HostPath, + Readonly: mount.Readonly, + } +} + +func taskHandleFromProto(pb *proto.TaskHandle) *TaskHandle { + if pb == nil { + return &TaskHandle{} + } + return &TaskHandle{ + Version: int(pb.Version), + Config: taskConfigFromProto(pb.Config), + State: taskStateFromProtoMap[pb.State], + DriverState: pb.DriverState, + } +} + +func taskHandleToProto(handle *TaskHandle) *proto.TaskHandle { + return &proto.TaskHandle{ + Version: int32(handle.Version), + Config: taskConfigToProto(handle.Config), + State: taskStateToProtoMap[handle.State], + DriverState: handle.DriverState, + } +} + +func exitResultToProto(result *ExitResult) *proto.ExitResult { + if result == nil { + return &proto.ExitResult{} + } + return &proto.ExitResult{ + ExitCode: int32(result.ExitCode), + Signal: int32(result.Signal), + OomKilled: result.OOMKilled, + } +} + +func exitResultFromProto(pb *proto.ExitResult) *ExitResult { + return &ExitResult{ + ExitCode: int(pb.ExitCode), + Signal: int(pb.Signal), + OOMKilled: pb.OomKilled, + } +} + +func taskStatusToProto(status *TaskStatus) (*proto.TaskStatus, error) { + started, err := ptypes.TimestampProto(status.StartedAt) + if err != nil { + return nil, err + } + completed, err := ptypes.TimestampProto(status.CompletedAt) + if err != nil { + return nil, err + } + return &proto.TaskStatus{ + Id: status.ID, + Name: status.Name, + State: taskStateToProtoMap[status.State], + StartedAt: started, + CompletedAt: completed, + Result: exitResultToProto(status.ExitResult), + }, nil +} + +func taskStatusFromProto(pb *proto.TaskStatus) (*TaskStatus, error) { + started, err := ptypes.Timestamp(pb.StartedAt) + if err != nil { + return nil, err + } + + completed, err := ptypes.Timestamp(pb.CompletedAt) + if err != nil { + return nil, err + } + + return &TaskStatus{ + ID: pb.Id, + Name: pb.Name, + State: taskStateFromProtoMap[pb.State], + StartedAt: started, + CompletedAt: completed, + ExitResult: exitResultFromProto(pb.Result), + }, nil +} + +func TaskStatsToProto(stats *TaskResourceUsage) (*proto.TaskStats, error) { + timestamp, err := ptypes.TimestampProto(time.Unix(0, stats.Timestamp)) + if err != nil { + return nil, err + } + + pids := map[string]*proto.TaskResourceUsage{} + for pid, ru := range stats.Pids { + pids[pid] = resourceUsageToProto(ru) + } + + return &proto.TaskStats{ + Timestamp: timestamp, + AggResourceUsage: resourceUsageToProto(stats.ResourceUsage), + ResourceUsageByPid: pids, + }, nil +} + +func TaskStatsFromProto(pb *proto.TaskStats) (*TaskResourceUsage, error) { + timestamp, err := ptypes.Timestamp(pb.Timestamp) + if err != nil { + return nil, err + } + + pids := map[string]*ResourceUsage{} + for pid, ru := range pb.ResourceUsageByPid { + pids[pid] = resourceUsageFromProto(ru) + } + + stats := &TaskResourceUsage{ + Timestamp: timestamp.UnixNano(), + ResourceUsage: resourceUsageFromProto(pb.AggResourceUsage), + Pids: pids, + } + + return stats, nil +} + +func resourceUsageToProto(ru *ResourceUsage) *proto.TaskResourceUsage { + cpu := &proto.CPUUsage{ + MeasuredFields: cpuUsageMeasuredFieldsToProto(ru.CpuStats.Measured), + SystemMode: ru.CpuStats.SystemMode, + UserMode: ru.CpuStats.UserMode, + TotalTicks: ru.CpuStats.TotalTicks, + ThrottledPeriods: ru.CpuStats.ThrottledPeriods, + ThrottledTime: ru.CpuStats.ThrottledTime, + Percent: ru.CpuStats.Percent, + } + + memory := &proto.MemoryUsage{ + MeasuredFields: memoryUsageMeasuredFieldsToProto(ru.MemoryStats.Measured), + Rss: ru.MemoryStats.RSS, + Cache: ru.MemoryStats.Cache, + Swap: ru.MemoryStats.Swap, + Usage: ru.MemoryStats.Usage, + MaxUsage: ru.MemoryStats.MaxUsage, + KernelUsage: ru.MemoryStats.KernelUsage, + KernelMaxUsage: ru.MemoryStats.KernelMaxUsage, + } + + return &proto.TaskResourceUsage{ + Cpu: cpu, + Memory: memory, + } +} + +func resourceUsageFromProto(pb *proto.TaskResourceUsage) *ResourceUsage { + cpu := CpuStats{} + if pb.Cpu != nil { + cpu = CpuStats{ + Measured: cpuUsageMeasuredFieldsFromProto(pb.Cpu.MeasuredFields), + SystemMode: pb.Cpu.SystemMode, + UserMode: pb.Cpu.UserMode, + TotalTicks: pb.Cpu.TotalTicks, + ThrottledPeriods: pb.Cpu.ThrottledPeriods, + ThrottledTime: pb.Cpu.ThrottledTime, + Percent: pb.Cpu.Percent, + } + } + + memory := MemoryStats{} + if pb.Memory != nil { + memory = MemoryStats{ + Measured: memoryUsageMeasuredFieldsFromProto(pb.Memory.MeasuredFields), + RSS: pb.Memory.Rss, + Cache: pb.Memory.Cache, + Swap: pb.Memory.Swap, + Usage: pb.Memory.Usage, + MaxUsage: pb.Memory.MaxUsage, + KernelUsage: pb.Memory.KernelUsage, + KernelMaxUsage: pb.Memory.KernelMaxUsage, + } + } + + return &ResourceUsage{ + CpuStats: &cpu, + MemoryStats: &memory, + } +} + +func BytesToMB(bytes int64) int64 { + return bytes / (1024 * 1024) +} + +var cpuUsageMeasuredFieldToProtoMap = map[string]proto.CPUUsage_Fields{ + "System Mode": proto.CPUUsage_SYSTEM_MODE, + "User Mode": proto.CPUUsage_USER_MODE, + "Total Ticks": proto.CPUUsage_TOTAL_TICKS, + "Throttled Periods": proto.CPUUsage_THROTTLED_PERIODS, + "Throttled Time": proto.CPUUsage_THROTTLED_TIME, + "Percent": proto.CPUUsage_PERCENT, +} + +var cpuUsageMeasuredFieldFromProtoMap = map[proto.CPUUsage_Fields]string{ + proto.CPUUsage_SYSTEM_MODE: "System Mode", + proto.CPUUsage_USER_MODE: "User Mode", + proto.CPUUsage_TOTAL_TICKS: "Total Ticks", + proto.CPUUsage_THROTTLED_PERIODS: "Throttled Periods", + proto.CPUUsage_THROTTLED_TIME: "Throttled Time", + proto.CPUUsage_PERCENT: "Percent", +} + +func cpuUsageMeasuredFieldsToProto(fields []string) []proto.CPUUsage_Fields { + r := make([]proto.CPUUsage_Fields, 0, len(fields)) + + for _, f := range fields { + if v, ok := cpuUsageMeasuredFieldToProtoMap[f]; ok { + r = append(r, v) + } + } + + return r +} + +func cpuUsageMeasuredFieldsFromProto(fields []proto.CPUUsage_Fields) []string { + r := make([]string, 0, len(fields)) + + for _, f := range fields { + if v, ok := cpuUsageMeasuredFieldFromProtoMap[f]; ok { + r = append(r, v) + } + } + + return r +} + +var memoryUsageMeasuredFieldToProtoMap = map[string]proto.MemoryUsage_Fields{ + "RSS": proto.MemoryUsage_RSS, + "Cache": proto.MemoryUsage_CACHE, + "Swap": proto.MemoryUsage_SWAP, + "Usage": proto.MemoryUsage_USAGE, + "Max Usage": proto.MemoryUsage_MAX_USAGE, + "Kernel Usage": proto.MemoryUsage_KERNEL_USAGE, + "Kernel Max Usage": proto.MemoryUsage_KERNEL_MAX_USAGE, +} + +var memoryUsageMeasuredFieldFromProtoMap = map[proto.MemoryUsage_Fields]string{ + proto.MemoryUsage_RSS: "RSS", + proto.MemoryUsage_CACHE: "Cache", + proto.MemoryUsage_SWAP: "Swap", + proto.MemoryUsage_USAGE: "Usage", + proto.MemoryUsage_MAX_USAGE: "Max Usage", + proto.MemoryUsage_KERNEL_USAGE: "Kernel Usage", + proto.MemoryUsage_KERNEL_MAX_USAGE: "Kernel Max Usage", +} + +func memoryUsageMeasuredFieldsToProto(fields []string) []proto.MemoryUsage_Fields { + r := make([]proto.MemoryUsage_Fields, 0, len(fields)) + + for _, f := range fields { + if v, ok := memoryUsageMeasuredFieldToProtoMap[f]; ok { + r = append(r, v) + } + } + + return r +} + +func memoryUsageMeasuredFieldsFromProto(fields []proto.MemoryUsage_Fields) []string { + r := make([]string, 0, len(fields)) + + for _, f := range fields { + if v, ok := memoryUsageMeasuredFieldFromProtoMap[f]; ok { + r = append(r, v) + } + } + + return r +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/drivers/versions.go b/vendor/github.com/hashicorp/nomad/plugins/drivers/versions.go new file mode 100644 index 0000000..4b94d90 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/drivers/versions.go @@ -0,0 +1,6 @@ +package drivers + +const ( + // ApiVersion010 is the initial API version for the device plugins + ApiVersion010 = "v0.1.0" +) diff --git a/vendor/github.com/hashicorp/nomad/plugins/serve.go b/vendor/github.com/hashicorp/nomad/plugins/serve.go new file mode 100644 index 0000000..f317f52 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/serve.go @@ -0,0 +1,30 @@ +package plugins + +import ( + "fmt" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/plugins/device" + "github.com/hashicorp/nomad/plugins/drivers" +) + +// PluginFactory returns a new plugin instance +type PluginFactory func(log log.Logger) interface{} + +// Serve is used to serve a new Nomad plugin +func Serve(f PluginFactory) { + logger := log.New(&log.LoggerOptions{ + Level: log.Trace, + JSONFormat: true, + }) + + plugin := f(logger) + switch p := plugin.(type) { + case device.DevicePlugin: + device.Serve(p, logger) + case drivers.DriverPlugin: + drivers.Serve(p, logger) + default: + fmt.Println("Unsupported plugin type") + } +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/hclspec/hcl_spec.pb.go b/vendor/github.com/hashicorp/nomad/plugins/shared/hclspec/hcl_spec.pb.go new file mode 100644 index 0000000..6bbc916 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/hclspec/hcl_spec.pb.go @@ -0,0 +1,1328 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/shared/hclspec/hcl_spec.proto + +package hclspec + +/* +Spec allows exposing the specification for an HCL body, allowing for parsing and +validation. + +Certain expressions within a specification may use the following functions. +The documentation for each spec type above specifies where functions may +be used. + +`abs(number)` returns the absolute (positive) value of the given number. +`coalesce(vals...)` returns the first non-null value given. +`concat(lists...)` concatenates together all of the given lists to produce a new list. +`hasindex(val, idx)` returns true if the expression `val[idx]` could succeed. +`int(number)` returns the integer portion of the given number, rounding towards zero. +`jsondecode(str)` interprets the given string as JSON and returns the resulting data structure. +`jsonencode(val)` returns a JSON-serialized version of the given value. +`length(collection)` returns the number of elements in the given collection (list, set, map, object, or tuple). +`lower(string)` returns the given string with all uppercase letters converted to lowercase. +`max(numbers...)` returns the greatest of the given numbers. +`min(numbers...)` returns the smallest of the given numbers. +`reverse(string)` returns the given string with all of the characters in reverse order. +`strlen(string)` returns the number of characters in the given string. +`substr(string, offset, length)` returns the requested substring of the given string. +`upper(string)` returns the given string with all lowercase letters converted to uppercase. + +## Type Expressions + +Type expressions are used to describe the expected type of an attribute, as +an additional validation constraint. + +A type expression uses primitive type names and compound type constructors. +A type constructor builds a new type based on one or more type expression +arguments. + +The following type names and type constructors are supported: + +`any` is a wildcard that accepts a value of any type. (In HCL terms, this +is the _dynamic pseudo-type_.) +`string` is a Unicode string. +`number` is an arbitrary-precision floating point number. +`bool` is a boolean value (`true` or `false`) +`list(element_type)` constructs a list type with the given element type +`set(element_type)` constructs a set type with the given element type +`map(element_type)` constructs a map type with the given element type +`object({name1 = element_type, name2 = element_type, ...})` constructs +an object type with the given attribute types. +`tuple([element_type, element_type, ...])` constructs a tuple type with +the given element types. This can be used, for example, to require an +array with a particular number of elements, or with elements of different +types. + +`null` is a valid value of any type, and not a type itself. +*/ + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Spec defines the available specification types. +type Spec struct { + // Types that are valid to be assigned to Block: + // *Spec_Object + // *Spec_Array + // *Spec_Attr + // *Spec_BlockValue + // *Spec_BlockAttrs + // *Spec_BlockList + // *Spec_BlockSet + // *Spec_BlockMap + // *Spec_Default + // *Spec_Literal + Block isSpec_Block `protobuf_oneof:"block"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Spec) Reset() { *m = Spec{} } +func (m *Spec) String() string { return proto.CompactTextString(m) } +func (*Spec) ProtoMessage() {} +func (*Spec) Descriptor() ([]byte, []int) { + return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{0} +} +func (m *Spec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Spec.Unmarshal(m, b) +} +func (m *Spec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Spec.Marshal(b, m, deterministic) +} +func (dst *Spec) XXX_Merge(src proto.Message) { + xxx_messageInfo_Spec.Merge(dst, src) +} +func (m *Spec) XXX_Size() int { + return xxx_messageInfo_Spec.Size(m) +} +func (m *Spec) XXX_DiscardUnknown() { + xxx_messageInfo_Spec.DiscardUnknown(m) +} + +var xxx_messageInfo_Spec proto.InternalMessageInfo + +type isSpec_Block interface { + isSpec_Block() +} + +type Spec_Object struct { + Object *Object `protobuf:"bytes,1,opt,name=object,proto3,oneof"` +} + +type Spec_Array struct { + Array *Array `protobuf:"bytes,2,opt,name=array,proto3,oneof"` +} + +type Spec_Attr struct { + Attr *Attr `protobuf:"bytes,3,opt,name=Attr,proto3,oneof"` +} + +type Spec_BlockValue struct { + BlockValue *Block `protobuf:"bytes,4,opt,name=block_value,json=blockValue,proto3,oneof"` +} + +type Spec_BlockAttrs struct { + BlockAttrs *BlockAttrs `protobuf:"bytes,5,opt,name=block_attrs,json=blockAttrs,proto3,oneof"` +} + +type Spec_BlockList struct { + BlockList *BlockList `protobuf:"bytes,6,opt,name=block_list,json=blockList,proto3,oneof"` +} + +type Spec_BlockSet struct { + BlockSet *BlockSet `protobuf:"bytes,7,opt,name=block_set,json=blockSet,proto3,oneof"` +} + +type Spec_BlockMap struct { + BlockMap *BlockMap `protobuf:"bytes,8,opt,name=block_map,json=blockMap,proto3,oneof"` +} + +type Spec_Default struct { + Default *Default `protobuf:"bytes,9,opt,name=default,proto3,oneof"` +} + +type Spec_Literal struct { + Literal *Literal `protobuf:"bytes,10,opt,name=literal,proto3,oneof"` +} + +func (*Spec_Object) isSpec_Block() {} + +func (*Spec_Array) isSpec_Block() {} + +func (*Spec_Attr) isSpec_Block() {} + +func (*Spec_BlockValue) isSpec_Block() {} + +func (*Spec_BlockAttrs) isSpec_Block() {} + +func (*Spec_BlockList) isSpec_Block() {} + +func (*Spec_BlockSet) isSpec_Block() {} + +func (*Spec_BlockMap) isSpec_Block() {} + +func (*Spec_Default) isSpec_Block() {} + +func (*Spec_Literal) isSpec_Block() {} + +func (m *Spec) GetBlock() isSpec_Block { + if m != nil { + return m.Block + } + return nil +} + +func (m *Spec) GetObject() *Object { + if x, ok := m.GetBlock().(*Spec_Object); ok { + return x.Object + } + return nil +} + +func (m *Spec) GetArray() *Array { + if x, ok := m.GetBlock().(*Spec_Array); ok { + return x.Array + } + return nil +} + +func (m *Spec) GetAttr() *Attr { + if x, ok := m.GetBlock().(*Spec_Attr); ok { + return x.Attr + } + return nil +} + +func (m *Spec) GetBlockValue() *Block { + if x, ok := m.GetBlock().(*Spec_BlockValue); ok { + return x.BlockValue + } + return nil +} + +func (m *Spec) GetBlockAttrs() *BlockAttrs { + if x, ok := m.GetBlock().(*Spec_BlockAttrs); ok { + return x.BlockAttrs + } + return nil +} + +func (m *Spec) GetBlockList() *BlockList { + if x, ok := m.GetBlock().(*Spec_BlockList); ok { + return x.BlockList + } + return nil +} + +func (m *Spec) GetBlockSet() *BlockSet { + if x, ok := m.GetBlock().(*Spec_BlockSet); ok { + return x.BlockSet + } + return nil +} + +func (m *Spec) GetBlockMap() *BlockMap { + if x, ok := m.GetBlock().(*Spec_BlockMap); ok { + return x.BlockMap + } + return nil +} + +func (m *Spec) GetDefault() *Default { + if x, ok := m.GetBlock().(*Spec_Default); ok { + return x.Default + } + return nil +} + +func (m *Spec) GetLiteral() *Literal { + if x, ok := m.GetBlock().(*Spec_Literal); ok { + return x.Literal + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Spec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Spec_OneofMarshaler, _Spec_OneofUnmarshaler, _Spec_OneofSizer, []interface{}{ + (*Spec_Object)(nil), + (*Spec_Array)(nil), + (*Spec_Attr)(nil), + (*Spec_BlockValue)(nil), + (*Spec_BlockAttrs)(nil), + (*Spec_BlockList)(nil), + (*Spec_BlockSet)(nil), + (*Spec_BlockMap)(nil), + (*Spec_Default)(nil), + (*Spec_Literal)(nil), + } +} + +func _Spec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Spec) + // block + switch x := m.Block.(type) { + case *Spec_Object: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Object); err != nil { + return err + } + case *Spec_Array: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Array); err != nil { + return err + } + case *Spec_Attr: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Attr); err != nil { + return err + } + case *Spec_BlockValue: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BlockValue); err != nil { + return err + } + case *Spec_BlockAttrs: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BlockAttrs); err != nil { + return err + } + case *Spec_BlockList: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BlockList); err != nil { + return err + } + case *Spec_BlockSet: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BlockSet); err != nil { + return err + } + case *Spec_BlockMap: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BlockMap); err != nil { + return err + } + case *Spec_Default: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Default); err != nil { + return err + } + case *Spec_Literal: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Literal); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Spec.Block has unexpected type %T", x) + } + return nil +} + +func _Spec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Spec) + switch tag { + case 1: // block.object + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Object) + err := b.DecodeMessage(msg) + m.Block = &Spec_Object{msg} + return true, err + case 2: // block.array + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Array) + err := b.DecodeMessage(msg) + m.Block = &Spec_Array{msg} + return true, err + case 3: // block.Attr + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Attr) + err := b.DecodeMessage(msg) + m.Block = &Spec_Attr{msg} + return true, err + case 4: // block.block_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Block) + err := b.DecodeMessage(msg) + m.Block = &Spec_BlockValue{msg} + return true, err + case 5: // block.block_attrs + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BlockAttrs) + err := b.DecodeMessage(msg) + m.Block = &Spec_BlockAttrs{msg} + return true, err + case 6: // block.block_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BlockList) + err := b.DecodeMessage(msg) + m.Block = &Spec_BlockList{msg} + return true, err + case 7: // block.block_set + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BlockSet) + err := b.DecodeMessage(msg) + m.Block = &Spec_BlockSet{msg} + return true, err + case 8: // block.block_map + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BlockMap) + err := b.DecodeMessage(msg) + m.Block = &Spec_BlockMap{msg} + return true, err + case 9: // block.default + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Default) + err := b.DecodeMessage(msg) + m.Block = &Spec_Default{msg} + return true, err + case 10: // block.literal + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Literal) + err := b.DecodeMessage(msg) + m.Block = &Spec_Literal{msg} + return true, err + default: + return false, nil + } +} + +func _Spec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Spec) + // block + switch x := m.Block.(type) { + case *Spec_Object: + s := proto.Size(x.Object) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Spec_Array: + s := proto.Size(x.Array) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Spec_Attr: + s := proto.Size(x.Attr) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Spec_BlockValue: + s := proto.Size(x.BlockValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Spec_BlockAttrs: + s := proto.Size(x.BlockAttrs) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Spec_BlockList: + s := proto.Size(x.BlockList) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Spec_BlockSet: + s := proto.Size(x.BlockSet) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Spec_BlockMap: + s := proto.Size(x.BlockMap) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Spec_Default: + s := proto.Size(x.Default) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Spec_Literal: + s := proto.Size(x.Literal) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Attr spec type reads the value of an attribute in the current body +// and returns that value as its result. It also creates validation constraints +// for the given attribute name and its value. +// +// ```hcl +// Attr { +// name = "document_root" +// type = string +// required = true +// } +// ``` +// +// `Attr` spec blocks accept the following arguments: +// +// `name` (required) - The attribute name to expect within the HCL input file. +// This may be omitted when a default name selector is created by a parent +// `Object` spec, if the input attribute name should match the output JSON +// object property name. +// +// `type` (optional) - A [type expression](#type-expressions) that the given +// attribute value must conform to. If this argument is set, `hcldec` will +// automatically convert the given input value to this type or produce an +// error if that is not possible. +// +// `required` (optional) - If set to `true`, `hcldec` will produce an error +// if a value is not provided for the source attribute. +// +// `Attr` is a leaf spec type, so no nested spec blocks are permitted. +type Attr struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Required bool `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attr) Reset() { *m = Attr{} } +func (m *Attr) String() string { return proto.CompactTextString(m) } +func (*Attr) ProtoMessage() {} +func (*Attr) Descriptor() ([]byte, []int) { + return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{1} +} +func (m *Attr) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Attr.Unmarshal(m, b) +} +func (m *Attr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Attr.Marshal(b, m, deterministic) +} +func (dst *Attr) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attr.Merge(dst, src) +} +func (m *Attr) XXX_Size() int { + return xxx_messageInfo_Attr.Size(m) +} +func (m *Attr) XXX_DiscardUnknown() { + xxx_messageInfo_Attr.DiscardUnknown(m) +} + +var xxx_messageInfo_Attr proto.InternalMessageInfo + +func (m *Attr) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Attr) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Attr) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +// Block spec type applies one nested spec block to the contents of a +// block within the current body and returns the result of that spec. It also +// creates validation constraints for the given block type name. +// +// ```hcl +// Block { +// name = "logging" +// +// Object { +// Attr "level" { +// type = string +// } +// Attr "file" { +// type = string +// } +// } +// } +// ``` +// +// `Block` spec blocks accept the following arguments: +// +// `name` (required) - The block type name to expect within the HCL +// input file. This may be omitted when a default name selector is created +// by a parent `Object` spec, if the input block type name should match the +// output JSON object property name. +// +// `required` (optional) - If set to `true`, `hcldec` will produce an error +// if a block of the specified type is not present in the current body. +// +// `Block` creates a validation constraint that there must be zero or one blocks +// of the given type name, or exactly one if `required` is set. +// +// `Block` expects a single nested spec block, which is applied to the body of +// the block of the given type when it is present. +// +type Block struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Required bool `protobuf:"varint,2,opt,name=required,proto3" json:"required,omitempty"` + Nested *Spec `protobuf:"bytes,3,opt,name=nested,proto3" json:"nested,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Block) Reset() { *m = Block{} } +func (m *Block) String() string { return proto.CompactTextString(m) } +func (*Block) ProtoMessage() {} +func (*Block) Descriptor() ([]byte, []int) { + return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{2} +} +func (m *Block) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Block.Unmarshal(m, b) +} +func (m *Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Block.Marshal(b, m, deterministic) +} +func (dst *Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Block.Merge(dst, src) +} +func (m *Block) XXX_Size() int { + return xxx_messageInfo_Block.Size(m) +} +func (m *Block) XXX_DiscardUnknown() { + xxx_messageInfo_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Block proto.InternalMessageInfo + +func (m *Block) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Block) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *Block) GetNested() *Spec { + if m != nil { + return m.Nested + } + return nil +} + +// +// The BlockAttrs spec type is similar to an Attr spec block of a map type, +// but it produces a map from the attributes of a block rather than from an +// attribute's expression. +// +// ```hcl +// BlockAttrs { +// name = "variables" +// type = string +// required = false +// } +// ``` +// +// This allows a map with user-defined keys to be produced within block syntax, +// but due to the constraints of that syntax it also means that the user will +// be unable to dynamically-generate either individual key names using key +// expressions or the entire map value using a `for` expression. +// +// `BlockAttrs` spec blocks accept the following arguments: +// +// `name` (required) - The block type name to expect within the HCL +// input file. This may be omitted when a default name selector is created +// by a parent `object` spec, if the input block type name should match the +// output JSON object property name. +// +// `type` (required) - The value type to require for each of the +// attributes within a matched block. The resulting value will be a JSON +// object whose property values are of this type. +// +// `required` (optional) - If `true`, an error will be produced if a block +// of the given type is not present. If `false` -- the default -- an absent +// block will be indicated by producing `null`. +type BlockAttrs struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Required bool `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockAttrs) Reset() { *m = BlockAttrs{} } +func (m *BlockAttrs) String() string { return proto.CompactTextString(m) } +func (*BlockAttrs) ProtoMessage() {} +func (*BlockAttrs) Descriptor() ([]byte, []int) { + return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{3} +} +func (m *BlockAttrs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockAttrs.Unmarshal(m, b) +} +func (m *BlockAttrs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockAttrs.Marshal(b, m, deterministic) +} +func (dst *BlockAttrs) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockAttrs.Merge(dst, src) +} +func (m *BlockAttrs) XXX_Size() int { + return xxx_messageInfo_BlockAttrs.Size(m) +} +func (m *BlockAttrs) XXX_DiscardUnknown() { + xxx_messageInfo_BlockAttrs.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockAttrs proto.InternalMessageInfo + +func (m *BlockAttrs) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BlockAttrs) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *BlockAttrs) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +// BlockList spec type is similar to `Block`, but it accepts zero or +// more blocks of a specified type rather than requiring zero or one. The +// result is a JSON array with one entry per block of the given type. +// +// ```hcl +// BlockList { +// name = "log_file" +// +// Object { +// Attr "level" { +// type = string +// } +// Attr "filename" { +// type = string +// required = true +// } +// } +// } +// ``` +// +// `BlockList` spec blocks accept the following arguments: +// +// `name` (required) - The block type name to expect within the HCL +// input file. This may be omitted when a default name selector is created +// by a parent `Object` spec, if the input block type name should match the +// output JSON object property name. +// +// `min_items` (optional) - If set to a number greater than zero, `hcldec` will +// produce an error if fewer than the given number of blocks are present. +// +// `max_items` (optional) - If set to a number greater than zero, `hcldec` will +// produce an error if more than the given number of blocks are present. This +// attribute must be greater than or equal to `min_items` if both are set. +// +// `Block` creates a validation constraint on the number of blocks of the given +// type that must be present. +// +// `Block` expects a single nested spec block, which is applied to the body of +// each matching block to produce the resulting list items. +// +type BlockList struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + MinItems uint64 `protobuf:"varint,2,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems uint64 `protobuf:"varint,3,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + Nested *Spec `protobuf:"bytes,4,opt,name=nested,proto3" json:"nested,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockList) Reset() { *m = BlockList{} } +func (m *BlockList) String() string { return proto.CompactTextString(m) } +func (*BlockList) ProtoMessage() {} +func (*BlockList) Descriptor() ([]byte, []int) { + return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{4} +} +func (m *BlockList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockList.Unmarshal(m, b) +} +func (m *BlockList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockList.Marshal(b, m, deterministic) +} +func (dst *BlockList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockList.Merge(dst, src) +} +func (m *BlockList) XXX_Size() int { + return xxx_messageInfo_BlockList.Size(m) +} +func (m *BlockList) XXX_DiscardUnknown() { + xxx_messageInfo_BlockList.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockList proto.InternalMessageInfo + +func (m *BlockList) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BlockList) GetMinItems() uint64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *BlockList) GetMaxItems() uint64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *BlockList) GetNested() *Spec { + if m != nil { + return m.Nested + } + return nil +} + +// BlockSet spec type behaves the same as BlockList except that +// the result is in no specific order and any duplicate items are removed. +// +// ```hcl +// BlockSet { +// name = "log_file" +// +// Object { +// Attr "level" { +// type = string +// } +// Attr "filename" { +// type = string +// required = true +// } +// } +// } +// ``` +// +// The contents of `BlockSet` are the same as for `BlockList`. +// +type BlockSet struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + MinItems uint64 `protobuf:"varint,2,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems uint64 `protobuf:"varint,3,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + Nested *Spec `protobuf:"bytes,4,opt,name=nested,proto3" json:"nested,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockSet) Reset() { *m = BlockSet{} } +func (m *BlockSet) String() string { return proto.CompactTextString(m) } +func (*BlockSet) ProtoMessage() {} +func (*BlockSet) Descriptor() ([]byte, []int) { + return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{5} +} +func (m *BlockSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockSet.Unmarshal(m, b) +} +func (m *BlockSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockSet.Marshal(b, m, deterministic) +} +func (dst *BlockSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockSet.Merge(dst, src) +} +func (m *BlockSet) XXX_Size() int { + return xxx_messageInfo_BlockSet.Size(m) +} +func (m *BlockSet) XXX_DiscardUnknown() { + xxx_messageInfo_BlockSet.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockSet proto.InternalMessageInfo + +func (m *BlockSet) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BlockSet) GetMinItems() uint64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *BlockSet) GetMaxItems() uint64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *BlockSet) GetNested() *Spec { + if m != nil { + return m.Nested + } + return nil +} + +// BlockMap spec type is similar to `Block`, but it accepts zero or +// more blocks of a specified type rather than requiring zero or one. The +// result is a JSON object, or possibly multiple nested JSON objects, whose +// properties are derived from the labels set on each matching block. +// +// ```hcl +// BlockMap { +// name = "log_file" +// labels = ["filename"] +// +// Object { +// Attr "level" { +// type = string +// required = true +// } +// } +// } +// ``` +// +// `BlockMap` spec blocks accept the following arguments: +// +// `name` (required) - The block type name to expect within the HCL +// input file. This may be omitted when a default name selector is created +// by a parent `Object` spec, if the input block type name should match the +// output JSON object property name. +// +// `labels` (required) - A list of user-oriented block label names. Each entry +// in this list creates one level of object within the output value, and +// requires one additional block header label on any child block of this type. +// Block header labels are the quoted strings that appear after the block type +// name but before the opening `{`. +// +// `Block` creates a validation constraint on the number of labels that blocks +// of the given type must have. +// +// `Block` expects a single nested spec block, which is applied to the body of +// each matching block to produce the resulting map items. +// +type BlockMap struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` + Nested *Spec `protobuf:"bytes,3,opt,name=nested,proto3" json:"nested,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockMap) Reset() { *m = BlockMap{} } +func (m *BlockMap) String() string { return proto.CompactTextString(m) } +func (*BlockMap) ProtoMessage() {} +func (*BlockMap) Descriptor() ([]byte, []int) { + return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{6} +} +func (m *BlockMap) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockMap.Unmarshal(m, b) +} +func (m *BlockMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockMap.Marshal(b, m, deterministic) +} +func (dst *BlockMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMap.Merge(dst, src) +} +func (m *BlockMap) XXX_Size() int { + return xxx_messageInfo_BlockMap.Size(m) +} +func (m *BlockMap) XXX_DiscardUnknown() { + xxx_messageInfo_BlockMap.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockMap proto.InternalMessageInfo + +func (m *BlockMap) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BlockMap) GetLabels() []string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *BlockMap) GetNested() *Spec { + if m != nil { + return m.Nested + } + return nil +} + +// Literal spec type returns a given literal value, and creates no +// validation constraints. It is most commonly used with the `Default` spec +// type to create a fallback value, but can also be used e.g. to fill out +// required properties in an `Object` spec that do not correspond to any +// construct in the input configuration. +// +// ```hcl +// Literal { +// value = "hello world" +// } +// ``` +// +// `Literal` spec blocks accept the following argument: +// +// `value` (required) - The value to return. This attribute may be an expression +// that uses [functions](#spec-definition-functions). +// +// `Literal` is a leaf spec type, so no nested spec blocks are permitted. +type Literal struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Literal) Reset() { *m = Literal{} } +func (m *Literal) String() string { return proto.CompactTextString(m) } +func (*Literal) ProtoMessage() {} +func (*Literal) Descriptor() ([]byte, []int) { + return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{7} +} +func (m *Literal) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Literal.Unmarshal(m, b) +} +func (m *Literal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Literal.Marshal(b, m, deterministic) +} +func (dst *Literal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Literal.Merge(dst, src) +} +func (m *Literal) XXX_Size() int { + return xxx_messageInfo_Literal.Size(m) +} +func (m *Literal) XXX_DiscardUnknown() { + xxx_messageInfo_Literal.DiscardUnknown(m) +} + +var xxx_messageInfo_Literal proto.InternalMessageInfo + +func (m *Literal) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Default spec type evaluates a sequence of nested specs in turn and +// returns the result of the first one that produces a non-null value. +// It creates no validation constraints of its own, but passes on the validation +// constraints from its first nested block. +// +// ```hcl +// Default { +// Attr { +// name = "private" +// type = bool +// } +// Literal { +// value = false +// } +// } +// ``` +// +// A `Default` spec block must have at least one nested spec block, and should +// generally have at least two since otherwise the `Default` wrapper is a no-op. +// +// The second and any subsequent spec blocks are _fallback_ specs. These exhibit +// their usual behavior but are not able to impose validation constraints on the +// current body since they are not evaluated unless all prior specs produce +// `null` as their result. +// +type Default struct { + Primary *Spec `protobuf:"bytes,1,opt,name=primary,proto3" json:"primary,omitempty"` + Default *Spec `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { + return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{8} +} +func (m *Default) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Default.Unmarshal(m, b) +} +func (m *Default) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Default.Marshal(b, m, deterministic) +} +func (dst *Default) XXX_Merge(src proto.Message) { + xxx_messageInfo_Default.Merge(dst, src) +} +func (m *Default) XXX_Size() int { + return xxx_messageInfo_Default.Size(m) +} +func (m *Default) XXX_DiscardUnknown() { + xxx_messageInfo_Default.DiscardUnknown(m) +} + +var xxx_messageInfo_Default proto.InternalMessageInfo + +func (m *Default) GetPrimary() *Spec { + if m != nil { + return m.Primary + } + return nil +} + +func (m *Default) GetDefault() *Spec { + if m != nil { + return m.Default + } + return nil +} + +// Object spec type is the most commonly used at the root of a spec file. +// Its result is a JSON object whose properties are set based on any nested +// spec blocks: +// +// ```hcl +// Object { +// Attr "name" { +// type = "string" +// } +// Block "address" { +// Object { +// Attr "street" { +// type = "string" +// } +// # ... +// } +// } +// } +// ``` +// +// Nested spec blocks inside `Object` must always have an extra block label +// `"name"`, `"address"` and `"street"` in the above example) that specifies +// the name of the property that should be created in the JSON object result. +// This label also acts as a default name selector for the nested spec, allowing +// the `Attr` blocks in the above example to omit the usually-required `name` +// argument in cases where the HCL input name and JSON output name are the same. +// +// An `Object` spec block creates no validation constraints, but it passes on +// any validation constraints created by the nested specs. +type Object struct { + Attributes map[string]*Spec `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object) Reset() { *m = Object{} } +func (m *Object) String() string { return proto.CompactTextString(m) } +func (*Object) ProtoMessage() {} +func (*Object) Descriptor() ([]byte, []int) { + return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{9} +} +func (m *Object) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Object.Unmarshal(m, b) +} +func (m *Object) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Object.Marshal(b, m, deterministic) +} +func (dst *Object) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object.Merge(dst, src) +} +func (m *Object) XXX_Size() int { + return xxx_messageInfo_Object.Size(m) +} +func (m *Object) XXX_DiscardUnknown() { + xxx_messageInfo_Object.DiscardUnknown(m) +} + +var xxx_messageInfo_Object proto.InternalMessageInfo + +func (m *Object) GetAttributes() map[string]*Spec { + if m != nil { + return m.Attributes + } + return nil +} + +// Array spec type produces a JSON array whose elements are set based on +// any nested spec blocks: +// +// ```hcl +// Array { +// Attr { +// name = "first_element" +// type = "string" +// } +// Attr { +// name = "second_element" +// type = "string" +// } +// } +// ``` +// +// An `Array` spec block creates no validation constraints, but it passes on +// any validation constraints created by the nested specs. +type Array struct { + Values []*Spec `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Array) Reset() { *m = Array{} } +func (m *Array) String() string { return proto.CompactTextString(m) } +func (*Array) ProtoMessage() {} +func (*Array) Descriptor() ([]byte, []int) { + return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{10} +} +func (m *Array) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Array.Unmarshal(m, b) +} +func (m *Array) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Array.Marshal(b, m, deterministic) +} +func (dst *Array) XXX_Merge(src proto.Message) { + xxx_messageInfo_Array.Merge(dst, src) +} +func (m *Array) XXX_Size() int { + return xxx_messageInfo_Array.Size(m) +} +func (m *Array) XXX_DiscardUnknown() { + xxx_messageInfo_Array.DiscardUnknown(m) +} + +var xxx_messageInfo_Array proto.InternalMessageInfo + +func (m *Array) GetValues() []*Spec { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Spec)(nil), "hashicorp.nomad.plugins.shared.hclspec.Spec") + proto.RegisterType((*Attr)(nil), "hashicorp.nomad.plugins.shared.hclspec.Attr") + proto.RegisterType((*Block)(nil), "hashicorp.nomad.plugins.shared.hclspec.Block") + proto.RegisterType((*BlockAttrs)(nil), "hashicorp.nomad.plugins.shared.hclspec.BlockAttrs") + proto.RegisterType((*BlockList)(nil), "hashicorp.nomad.plugins.shared.hclspec.BlockList") + proto.RegisterType((*BlockSet)(nil), "hashicorp.nomad.plugins.shared.hclspec.BlockSet") + proto.RegisterType((*BlockMap)(nil), "hashicorp.nomad.plugins.shared.hclspec.BlockMap") + proto.RegisterType((*Literal)(nil), "hashicorp.nomad.plugins.shared.hclspec.Literal") + proto.RegisterType((*Default)(nil), "hashicorp.nomad.plugins.shared.hclspec.Default") + proto.RegisterType((*Object)(nil), "hashicorp.nomad.plugins.shared.hclspec.Object") + proto.RegisterMapType((map[string]*Spec)(nil), "hashicorp.nomad.plugins.shared.hclspec.Object.AttributesEntry") + proto.RegisterType((*Array)(nil), "hashicorp.nomad.plugins.shared.hclspec.Array") +} + +func init() { + proto.RegisterFile("plugins/shared/hclspec/hcl_spec.proto", fileDescriptor_hcl_spec_8d078e4df12ae415) +} + +var fileDescriptor_hcl_spec_8d078e4df12ae415 = []byte{ + // 624 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0x4d, 0x6f, 0xd3, 0x4c, + 0x10, 0xc7, 0xe3, 0xc4, 0xaf, 0xd3, 0xc3, 0xf3, 0x68, 0x85, 0x90, 0x55, 0x0e, 0x54, 0x96, 0x40, + 0x3d, 0x80, 0x0b, 0xe5, 0x82, 0x38, 0x20, 0x35, 0x6a, 0x91, 0x81, 0x46, 0xad, 0xb6, 0x82, 0x03, + 0x07, 0xa2, 0xb5, 0xb3, 0x10, 0x13, 0xbf, 0xb1, 0xbb, 0x41, 0x8d, 0x04, 0x1f, 0x84, 0x03, 0xf0, + 0xa9, 0xf8, 0x3e, 0x68, 0x5f, 0x9c, 0x14, 0x94, 0x43, 0x1c, 0x7a, 0xe0, 0x94, 0x19, 0x8f, 0xfe, + 0x3f, 0xcf, 0xec, 0xce, 0x78, 0x02, 0x77, 0x9a, 0x62, 0xfe, 0x3e, 0xaf, 0xf8, 0x01, 0x9f, 0x12, + 0x46, 0x27, 0x07, 0xd3, 0xac, 0xe0, 0x0d, 0xcd, 0xe4, 0xef, 0x58, 0x1a, 0x71, 0xc3, 0x6a, 0x51, + 0xa3, 0xbb, 0x53, 0xc2, 0xa7, 0x79, 0x56, 0xb3, 0x26, 0xae, 0xea, 0x92, 0x4c, 0x62, 0x23, 0x8b, + 0xb5, 0x2c, 0x36, 0xb2, 0xe8, 0x9b, 0x0b, 0xf6, 0x45, 0x43, 0x33, 0x94, 0x80, 0x5b, 0xa7, 0x1f, + 0x68, 0x26, 0x42, 0x6b, 0xcf, 0xda, 0xdf, 0x39, 0x8c, 0xe3, 0xcd, 0x08, 0xf1, 0x99, 0x52, 0x25, + 0x3d, 0x6c, 0xf4, 0xe8, 0x04, 0x1c, 0xc2, 0x18, 0x59, 0x84, 0x7d, 0x05, 0xba, 0xbf, 0x29, 0xe8, + 0x48, 0x8a, 0x92, 0x1e, 0xd6, 0x6a, 0x34, 0x04, 0xfb, 0x48, 0x08, 0x16, 0x0e, 0x14, 0xe5, 0xde, + 0xc6, 0x14, 0x21, 0x58, 0xd2, 0xc3, 0x4a, 0x8b, 0xce, 0x61, 0x27, 0x2d, 0xea, 0x6c, 0x36, 0xfe, + 0x44, 0x8a, 0x39, 0x0d, 0xed, 0x6e, 0x09, 0x0d, 0xa5, 0x34, 0xe9, 0x61, 0x50, 0x8c, 0xd7, 0x12, + 0x81, 0x5e, 0xb5, 0x44, 0x22, 0x04, 0xe3, 0xa1, 0xa3, 0x88, 0x87, 0x9d, 0x88, 0x32, 0x33, 0xbe, + 0xc4, 0x2a, 0x0f, 0x61, 0xd0, 0xde, 0xb8, 0xc8, 0xb9, 0x08, 0x5d, 0x45, 0x7d, 0xd8, 0x89, 0x7a, + 0x9a, 0x73, 0x79, 0x09, 0x41, 0xda, 0x3a, 0xe8, 0x0c, 0xb4, 0x33, 0xe6, 0x54, 0x84, 0x9e, 0x42, + 0x3e, 0xe8, 0x84, 0xbc, 0xa0, 0x92, 0xe8, 0xa7, 0xc6, 0x5e, 0x01, 0x4b, 0xd2, 0x84, 0xfe, 0x16, + 0xc0, 0x11, 0x69, 0x96, 0xc0, 0x11, 0x69, 0xd0, 0x4b, 0xf0, 0x26, 0xf4, 0x1d, 0x99, 0x17, 0x22, + 0x0c, 0x14, 0xee, 0x60, 0x53, 0xdc, 0xb1, 0x96, 0x25, 0x3d, 0xdc, 0x12, 0x24, 0xac, 0xc8, 0x05, + 0x65, 0xa4, 0x08, 0xa1, 0x1b, 0xec, 0x54, 0xcb, 0x24, 0xcc, 0x10, 0x86, 0x1e, 0x38, 0x2a, 0xcb, + 0xe8, 0x85, 0xee, 0x42, 0x84, 0xc0, 0xae, 0x48, 0x49, 0xd5, 0x70, 0x04, 0x58, 0xd9, 0xf2, 0x99, + 0x58, 0x34, 0x54, 0xf5, 0x79, 0x80, 0x95, 0x8d, 0x76, 0xc1, 0x67, 0xf4, 0xe3, 0x3c, 0x67, 0x74, + 0xa2, 0x3a, 0xd7, 0xc7, 0x4b, 0x3f, 0xfa, 0x02, 0x8e, 0x3a, 0x86, 0xb5, 0xb0, 0xab, 0xc2, 0xfe, + 0xef, 0x42, 0x74, 0x0c, 0x6e, 0x45, 0xb9, 0x30, 0xc8, 0x0e, 0xc3, 0x20, 0x27, 0x1b, 0x1b, 0x6d, + 0x74, 0x0e, 0xb0, 0xea, 0xbf, 0x6b, 0x29, 0xe8, 0x87, 0x05, 0xc1, 0xb2, 0xf9, 0xd6, 0x12, 0x6f, + 0x41, 0x50, 0xe6, 0xd5, 0x38, 0x17, 0xb4, 0xe4, 0x0a, 0x6b, 0x63, 0xbf, 0xcc, 0xab, 0xe7, 0xd2, + 0x57, 0x41, 0x72, 0x69, 0x82, 0x03, 0x13, 0x24, 0x97, 0x3a, 0xb8, 0xaa, 0xd9, 0xfe, 0x8b, 0x9a, + 0xbf, 0x5b, 0xe0, 0xb7, 0xbd, 0xfc, 0x4f, 0x26, 0xf8, 0xd9, 0xe4, 0x27, 0xc7, 0x61, 0x5d, 0x7e, + 0x37, 0xc1, 0x2d, 0x48, 0x4a, 0x0b, 0x99, 0xdc, 0x60, 0x3f, 0xc0, 0xc6, 0xbb, 0xa6, 0x96, 0xb8, + 0x0d, 0x9e, 0x69, 0x7e, 0x74, 0x03, 0x1c, 0xfd, 0x91, 0xd4, 0x6f, 0xd7, 0x4e, 0xf4, 0xd5, 0x02, + 0xcf, 0xcc, 0x1a, 0x7a, 0x06, 0x5e, 0xc3, 0xf2, 0x92, 0xb0, 0x85, 0x59, 0x11, 0xdd, 0xde, 0xd9, + 0x8a, 0x25, 0xa7, 0x9d, 0xfa, 0xfe, 0x36, 0x1c, 0x23, 0x8e, 0x7e, 0x5a, 0xe0, 0xea, 0xe5, 0x83, + 0xde, 0x02, 0xc8, 0xef, 0x71, 0x9e, 0xce, 0x05, 0xe5, 0xa1, 0xb5, 0x37, 0xd8, 0xdf, 0x39, 0x7c, + 0xda, 0x6d, 0x81, 0xa9, 0xc5, 0xa1, 0x01, 0x27, 0x95, 0x60, 0x0b, 0x7c, 0x85, 0xb8, 0x3b, 0x83, + 0xff, 0xfe, 0x08, 0xa3, 0xff, 0x61, 0x30, 0xa3, 0x0b, 0x73, 0x5a, 0xd2, 0x44, 0xc3, 0xf6, 0x04, + 0xb7, 0xa9, 0x4a, 0x4b, 0x9f, 0xf4, 0x1f, 0x5b, 0xd1, 0x08, 0x1c, 0xb5, 0x0a, 0xe5, 0x1d, 0xab, + 0xa7, 0x6d, 0x45, 0x1d, 0xef, 0x58, 0x6b, 0x87, 0xc1, 0x1b, 0xcf, 0x3c, 0x4f, 0x5d, 0xf5, 0xdf, + 0xe0, 0xd1, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x32, 0x20, 0x9f, 0xf2, 0x44, 0x08, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/hclspec/hcl_spec.proto b/vendor/github.com/hashicorp/nomad/plugins/shared/hclspec/hcl_spec.proto new file mode 100644 index 0000000..e6ae1a1 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/hclspec/hcl_spec.proto @@ -0,0 +1,421 @@ +syntax = "proto3"; + +option go_package = "hclspec"; + +/* Spec allows exposing the specification for an HCL body, allowing for parsing and +validation. + +Certain expressions within a specification may use the following functions. +The documentation for each spec type above specifies where functions may +be used. + +* `abs(number)` returns the absolute (positive) value of the given number. +* `coalesce(vals...)` returns the first non-null value given. +* `concat(lists...)` concatenates together all of the given lists to produce a new list. +* `hasindex(val, idx)` returns true if the expression `val[idx]` could succeed. +* `int(number)` returns the integer portion of the given number, rounding towards zero. +* `jsondecode(str)` interprets the given string as JSON and returns the resulting data structure. +* `jsonencode(val)` returns a JSON-serialized version of the given value. +* `length(collection)` returns the number of elements in the given collection (list, set, map, object, or tuple). +* `lower(string)` returns the given string with all uppercase letters converted to lowercase. +* `max(numbers...)` returns the greatest of the given numbers. +* `min(numbers...)` returns the smallest of the given numbers. +* `reverse(string)` returns the given string with all of the characters in reverse order. +* `strlen(string)` returns the number of characters in the given string. +* `substr(string, offset, length)` returns the requested substring of the given string. +* `upper(string)` returns the given string with all lowercase letters converted to uppercase. + +## Type Expressions + +Type expressions are used to describe the expected type of an attribute, as +an additional validation constraint. + +A type expression uses primitive type names and compound type constructors. +A type constructor builds a new type based on one or more type expression +arguments. + +The following type names and type constructors are supported: + +* `any` is a wildcard that accepts a value of any type. (In HCL terms, this + is the _dynamic pseudo-type_.) +* `string` is a Unicode string. +* `number` is an arbitrary-precision floating point number. +* `bool` is a boolean value (`true` or `false`) +* `list(element_type)` constructs a list type with the given element type +* `set(element_type)` constructs a set type with the given element type +* `map(element_type)` constructs a map type with the given element type +* `object({name1 = element_type, name2 = element_type, ...})` constructs + an object type with the given attribute types. +* `tuple([element_type, element_type, ...])` constructs a tuple type with + the given element types. This can be used, for example, to require an + array with a particular number of elements, or with elements of different + types. + +`null` is a valid value of any type, and not a type itself. +*/ +package hashicorp.nomad.plugins.shared.hclspec; + +// Spec defines the available specification types. +message Spec { + oneof block { + Object object = 1; + Array array = 2; + Attr Attr = 3; + Block block_value = 4; + BlockAttrs block_attrs = 5; + BlockList block_list = 6; + BlockSet block_set = 7; + BlockMap block_map = 8; + Default default = 9; + Literal literal = 10; + } +} + +/* Attr spec type reads the value of an attribute in the current body +and returns that value as its result. It also creates validation constraints +for the given attribute name and its value. + +```hcl +Attr { + name = "document_root" + type = string + required = true +} +``` + +`Attr` spec blocks accept the following arguments: + +* `name` (required) - The attribute name to expect within the HCL input file. + This may be omitted when a default name selector is created by a parent + `Object` spec, if the input attribute name should match the output JSON + object property name. + +* `type` (optional) - A [type expression](#type-expressions) that the given + attribute value must conform to. If this argument is set, `hcldec` will + automatically convert the given input value to this type or produce an + error if that is not possible. + +* `required` (optional) - If set to `true`, `hcldec` will produce an error + if a value is not provided for the source attribute. + +`Attr` is a leaf spec type, so no nested spec blocks are permitted. +*/ +message Attr { + string name = 1; + string type = 2; + bool required = 3; +} + +/* Block spec type applies one nested spec block to the contents of a +block within the current body and returns the result of that spec. It also +creates validation constraints for the given block type name. + +```hcl +Block { + name = "logging" + + Object { + Attr "level" { + type = string + } + Attr "file" { + type = string + } + } +} +``` + +`Block` spec blocks accept the following arguments: + +* `name` (required) - The block type name to expect within the HCL + input file. This may be omitted when a default name selector is created + by a parent `Object` spec, if the input block type name should match the + output JSON object property name. + +* `required` (optional) - If set to `true`, `hcldec` will produce an error + if a block of the specified type is not present in the current body. + +`Block` creates a validation constraint that there must be zero or one blocks +of the given type name, or exactly one if `required` is set. + +`Block` expects a single nested spec block, which is applied to the body of +the block of the given type when it is present. + +*/ +message Block { + string name = 1; + bool required = 2; + Spec nested = 3; +} + +/* + The BlockAttrs spec type is similar to an Attr spec block of a map type, + but it produces a map from the attributes of a block rather than from an + attribute's expression. + + ```hcl + BlockAttrs { + name = "variables" + type = string + required = false + } + ``` + + This allows a map with user-defined keys to be produced within block syntax, + but due to the constraints of that syntax it also means that the user will + be unable to dynamically-generate either individual key names using key + expressions or the entire map value using a `for` expression. + + `BlockAttrs` spec blocks accept the following arguments: + + * `name` (required) - The block type name to expect within the HCL + input file. This may be omitted when a default name selector is created + by a parent `object` spec, if the input block type name should match the + output JSON object property name. + + * `type` (required) - The value type to require for each of the + attributes within a matched block. The resulting value will be a JSON + object whose property values are of this type. + + * `required` (optional) - If `true`, an error will be produced if a block + of the given type is not present. If `false` -- the default -- an absent + block will be indicated by producing `null`. +*/ +message BlockAttrs { + string name = 1; + string type = 2; + bool required = 3; +} + +/* BlockList spec type is similar to `Block`, but it accepts zero or +more blocks of a specified type rather than requiring zero or one. The +result is a JSON array with one entry per block of the given type. + +```hcl +BlockList { + name = "log_file" + + Object { + Attr "level" { + type = string + } + Attr "filename" { + type = string + required = true + } + } +} +``` + +`BlockList` spec blocks accept the following arguments: + +* `name` (required) - The block type name to expect within the HCL + input file. This may be omitted when a default name selector is created + by a parent `Object` spec, if the input block type name should match the + output JSON object property name. + +* `min_items` (optional) - If set to a number greater than zero, `hcldec` will + produce an error if fewer than the given number of blocks are present. + +* `max_items` (optional) - If set to a number greater than zero, `hcldec` will + produce an error if more than the given number of blocks are present. This + attribute must be greater than or equal to `min_items` if both are set. + +`Block` creates a validation constraint on the number of blocks of the given +type that must be present. + +`Block` expects a single nested spec block, which is applied to the body of +each matching block to produce the resulting list items. + +*/ +message BlockList { + string name = 1; + uint64 min_items = 2; + uint64 max_items = 3; + Spec nested = 4; +} + +/* BlockSet spec type behaves the same as BlockList except that +the result is in no specific order and any duplicate items are removed. + +```hcl +BlockSet { + name = "log_file" + + Object { + Attr "level" { + type = string + } + Attr "filename" { + type = string + required = true + } + } +} +``` + +The contents of `BlockSet` are the same as for `BlockList`. + +*/ +message BlockSet { + string name = 1; + uint64 min_items = 2; + uint64 max_items = 3; + Spec nested = 4; +} + +/* BlockMap spec type is similar to `Block`, but it accepts zero or +more blocks of a specified type rather than requiring zero or one. The +result is a JSON object, or possibly multiple nested JSON objects, whose +properties are derived from the labels set on each matching block. + +```hcl +BlockMap { + name = "log_file" + labels = ["filename"] + + Object { + Attr "level" { + type = string + required = true + } + } +} +``` + +`BlockMap` spec blocks accept the following arguments: + +* `name` (required) - The block type name to expect within the HCL + input file. This may be omitted when a default name selector is created + by a parent `Object` spec, if the input block type name should match the + output JSON object property name. + +* `labels` (required) - A list of user-oriented block label names. Each entry + in this list creates one level of object within the output value, and + requires one additional block header label on any child block of this type. + Block header labels are the quoted strings that appear after the block type + name but before the opening `{`. + +`Block` creates a validation constraint on the number of labels that blocks +of the given type must have. + +`Block` expects a single nested spec block, which is applied to the body of +each matching block to produce the resulting map items. + +*/ +message BlockMap { + string name = 1; + repeated string labels = 2; + Spec nested = 3; +} + +/* Literal spec type returns a given literal value, and creates no +validation constraints. It is most commonly used with the `Default` spec +type to create a fallback value, but can also be used e.g. to fill out +required properties in an `Object` spec that do not correspond to any +construct in the input configuration. + +```hcl +Literal { + value = "hello world" +} +``` + +`Literal` spec blocks accept the following argument: + +* `value` (required) - The value to return. This attribute may be an expression + that uses [functions](#spec-definition-functions). + +`Literal` is a leaf spec type, so no nested spec blocks are permitted. +*/ +message Literal { + string value = 1; +} + +/* Default spec type evaluates a sequence of nested specs in turn and +returns the result of the first one that produces a non-null value. +It creates no validation constraints of its own, but passes on the validation +constraints from its first nested block. + +```hcl +Default { + Attr { + name = "private" + type = bool + } + Literal { + value = false + } +} +``` + +A `Default` spec block must have at least one nested spec block, and should +generally have at least two since otherwise the `Default` wrapper is a no-op. + +The second and any subsequent spec blocks are _fallback_ specs. These exhibit +their usual behavior but are not able to impose validation constraints on the +current body since they are not evaluated unless all prior specs produce +`null` as their result. + +*/ +message Default { + Spec primary = 1; + Spec default = 2; +} + +/* Object spec type is the most commonly used at the root of a spec file. +Its result is a JSON object whose properties are set based on any nested +spec blocks: + +```hcl +Object { + Attr "name" { + type = "string" + } + Block "address" { + Object { + Attr "street" { + type = "string" + } + # ... + } + } +} +``` + +Nested spec blocks inside `Object` must always have an extra block label +`"name"`, `"address"` and `"street"` in the above example) that specifies +the name of the property that should be created in the JSON object result. +This label also acts as a default name selector for the nested spec, allowing +the `Attr` blocks in the above example to omit the usually-required `name` +argument in cases where the HCL input name and JSON output name are the same. + +An `Object` spec block creates no validation constraints, but it passes on +any validation constraints created by the nested specs. +*/ +message Object { + map attributes = 1; +} + +/* Array spec type produces a JSON array whose elements are set based on +any nested spec blocks: + +```hcl +Array { + Attr { + name = "first_element" + type = "string" + } + Attr { + name = "second_element" + type = "string" + } +} +``` + +An `Array` spec block creates no validation constraints, but it passes on +any validation constraints created by the nested specs. +*/ +message Array { + repeated Spec values = 1; +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/hclspec/spec.go b/vendor/github.com/hashicorp/nomad/plugins/shared/hclspec/spec.go new file mode 100644 index 0000000..87ceab1 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/hclspec/spec.go @@ -0,0 +1,188 @@ +package hclspec + +// ObjectSpec wraps the object and returns a spec. +func ObjectSpec(obj *Object) *Spec { + return &Spec{ + Block: &Spec_Object{ + Object: obj, + }, + } +} + +// ArraySpec wraps the array and returns a spec. +func ArraySpec(array *Array) *Spec { + return &Spec{ + Block: &Spec_Array{ + Array: array, + }, + } +} + +// AttrSpec wraps the attr and returns a spec. +func AttrSpec(attr *Attr) *Spec { + return &Spec{ + Block: &Spec_Attr{ + Attr: attr, + }, + } +} + +// BlockSpec wraps the block and returns a spec. +func BlockSpec(block *Block) *Spec { + return &Spec{ + Block: &Spec_BlockValue{ + BlockValue: block, + }, + } +} + +// BlockAttrsSpec wraps the block attrs and returns a spec. +func BlockAttrsSpec(blockAttrs *BlockAttrs) *Spec { + return &Spec{ + Block: &Spec_BlockAttrs{ + BlockAttrs: blockAttrs, + }, + } +} + +// BlockListSpec wraps the block list and returns a spec. +func BlockListSpec(blockList *BlockList) *Spec { + return &Spec{ + Block: &Spec_BlockList{ + BlockList: blockList, + }, + } +} + +// BlockSetSpec wraps the block set and returns a spec. +func BlockSetSpec(blockSet *BlockSet) *Spec { + return &Spec{ + Block: &Spec_BlockSet{ + BlockSet: blockSet, + }, + } +} + +// BlockMapSpec wraps the block map and returns a spec. +func BlockMapSpec(blockMap *BlockMap) *Spec { + return &Spec{ + Block: &Spec_BlockMap{ + BlockMap: blockMap, + }, + } +} + +// DefaultSpec wraps the default and returns a spec. +func DefaultSpec(d *Default) *Spec { + return &Spec{ + Block: &Spec_Default{ + Default: d, + }, + } +} + +// LiteralSpec wraps the literal and returns a spec. +func LiteralSpec(l *Literal) *Spec { + return &Spec{ + Block: &Spec_Literal{ + Literal: l, + }, + } +} + +// NewObject returns a new object spec. +func NewObject(attrs map[string]*Spec) *Spec { + return ObjectSpec(&Object{ + Attributes: attrs, + }) +} + +// NewAttr returns a new attribute spec. +func NewAttr(name, attrType string, required bool) *Spec { + return AttrSpec(&Attr{ + Name: name, + Type: attrType, + Required: required, + }) +} + +// NewBlock returns a new block spec. +func NewBlock(name string, required bool, nested *Spec) *Spec { + return BlockSpec(&Block{ + Name: name, + Required: required, + Nested: nested, + }) +} + +// NewBlockAttrs returns a new block attrs spec +func NewBlockAttrs(name, elementType string, required bool) *Spec { + return BlockAttrsSpec(&BlockAttrs{ + Name: name, + Required: required, + Type: elementType, + }) +} + +// NewBlockList returns a new block list spec that has no limits. +func NewBlockList(name string, nested *Spec) *Spec { + return NewBlockListLimited(name, 0, 0, nested) +} + +// NewBlockListLimited returns a new block list spec that limits the number of +// blocks. +func NewBlockListLimited(name string, min, max uint64, nested *Spec) *Spec { + return BlockListSpec(&BlockList{ + Name: name, + MinItems: min, + MaxItems: max, + Nested: nested, + }) +} + +// NewBlockSet returns a new block set spec that has no limits. +func NewBlockSet(name string, nested *Spec) *Spec { + return NewBlockSetLimited(name, 0, 0, nested) +} + +// NewBlockSetLimited returns a new block set spec that limits the number of +// blocks. +func NewBlockSetLimited(name string, min, max uint64, nested *Spec) *Spec { + return BlockSetSpec(&BlockSet{ + Name: name, + MinItems: min, + MaxItems: max, + Nested: nested, + }) +} + +// NewBlockMap returns a new block map spec. +func NewBlockMap(name string, labels []string, nested *Spec) *Spec { + return BlockMapSpec(&BlockMap{ + Name: name, + Labels: labels, + Nested: nested, + }) +} + +// NewLiteral returns a new literal spec. +func NewLiteral(value string) *Spec { + return LiteralSpec(&Literal{ + Value: value, + }) +} + +// NewDefault returns a new default spec. +func NewDefault(primary, defaultValue *Spec) *Spec { + return DefaultSpec(&Default{ + Primary: primary, + Default: defaultValue, + }) +} + +// NewArray returns a new array spec. +func NewArray(values []*Spec) *Spec { + return ArraySpec(&Array{ + Values: values, + }) +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/structs/attribute.go b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/attribute.go new file mode 100644 index 0000000..6836a99 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/attribute.go @@ -0,0 +1,462 @@ +package structs + +import ( + "fmt" + "math/big" + "strconv" + "strings" + "unicode" + + "github.com/hashicorp/nomad/helper" +) + +const ( + // floatPrecision is the precision used before rounding. It is set to a high + // number to give a high chance of correctly returning equality. + floatPrecision = uint(256) +) + +// BaseUnit is a unique base unit. All units that share the same base unit +// should be comparable. +type BaseUnit uint16 + +const ( + UnitScalar BaseUnit = iota + UnitByte + UnitByteRate + UnitHertz + UnitWatt +) + +// Unit describes a unit and its multiplier over the base unit type +type Unit struct { + // Name is the name of the unit (GiB, MB/s) + Name string + + // Base is the base unit for the unit + Base BaseUnit + + // Multiplier is the multiplier over the base unit (KiB multiplier is 1024) + Multiplier int64 + + // InverseMultiplier specifies that the multiplier is an inverse so: + // Base / Multiplier. For example a mW is a W/1000. + InverseMultiplier bool +} + +// Comparable returns if two units are comparable +func (u *Unit) Comparable(o *Unit) bool { + if u == nil || o == nil { + return false + } + + return u.Base == o.Base +} + +// ParseAttribute takes a string and parses it into an attribute, pulling out +// units if they are specified as a suffix on a number. +func ParseAttribute(input string) *Attribute { + ll := len(input) + if ll == 0 { + return &Attribute{String: helper.StringToPtr(input)} + } + + // Check if the string is a number ending with potential units + var unit string + numeric := input + if unicode.IsLetter(rune(input[ll-1])) { + // Try suffix matching + for _, u := range lengthSortedUnits { + if strings.HasSuffix(input, u) { + unit = u + break + } + } + + // Check if we know about the unit. + if len(unit) != 0 { + numeric = strings.TrimSpace(strings.TrimSuffix(input, unit)) + } + } + + // Try to parse as an int + i, err := strconv.ParseInt(numeric, 10, 64) + if err == nil { + return &Attribute{Int: helper.Int64ToPtr(i), Unit: unit} + } + + // Try to parse as a float + f, err := strconv.ParseFloat(numeric, 64) + if err == nil { + return &Attribute{Float: helper.Float64ToPtr(f), Unit: unit} + } + + // Try to parse as a bool + b, err := strconv.ParseBool(input) + if err == nil { + return &Attribute{Bool: helper.BoolToPtr(b)} + } + + return &Attribute{String: helper.StringToPtr(input)} +} + +// Attribute is used to describe the value of an attribute, optionally +// specifying units +type Attribute struct { + // Float is the float value for the attribute + Float *float64 + + // Int is the int value for the attribute + Int *int64 + + // String is the string value for the attribute + String *string + + // Bool is the bool value for the attribute + Bool *bool + + // Unit is the optional unit for the set int or float value + Unit string +} + +// NewStringAttribute returns a new string attribute. +func NewStringAttribute(s string) *Attribute { + return &Attribute{ + String: helper.StringToPtr(s), + } +} + +// NewBoolAttribute returns a new boolean attribute. +func NewBoolAttribute(b bool) *Attribute { + return &Attribute{ + Bool: helper.BoolToPtr(b), + } +} + +// NewIntergerAttribute returns a new integer attribute. The unit is not checked +// to be valid. +func NewIntAttribute(i int64, unit string) *Attribute { + return &Attribute{ + Int: helper.Int64ToPtr(i), + Unit: unit, + } +} + +// NewFloatAttribute returns a new float attribute. The unit is not checked to +// be valid. +func NewFloatAttribute(f float64, unit string) *Attribute { + return &Attribute{ + Float: helper.Float64ToPtr(f), + Unit: unit, + } +} + +// GetString returns the string value of the attribute or false if the attribute +// doesn't contain a string. +func (a *Attribute) GetString() (value string, ok bool) { + if a.String == nil { + return "", false + } + + return *a.String, true +} + +// GetBool returns the boolean value of the attribute or false if the attribute +// doesn't contain a boolean. +func (a *Attribute) GetBool() (value bool, ok bool) { + if a.Bool == nil { + return false, false + } + + return *a.Bool, true +} + +// GetInt returns the integer value of the attribute or false if the attribute +// doesn't contain a integer. +func (a *Attribute) GetInt() (value int64, ok bool) { + if a.Int == nil { + return 0, false + } + + return *a.Int, true +} + +// GetFloat returns the float value of the attribute or false if the attribute +// doesn't contain a float. +func (a *Attribute) GetFloat() (value float64, ok bool) { + if a.Float == nil { + return 0.0, false + } + + return *a.Float, true +} + +// Copy returns a copied version of the attribute +func (a *Attribute) Copy() *Attribute { + if a == nil { + return nil + } + + ca := &Attribute{ + Unit: a.Unit, + } + + if a.Float != nil { + ca.Float = helper.Float64ToPtr(*a.Float) + } + if a.Int != nil { + ca.Int = helper.Int64ToPtr(*a.Int) + } + if a.Bool != nil { + ca.Bool = helper.BoolToPtr(*a.Bool) + } + if a.String != nil { + ca.String = helper.StringToPtr(*a.String) + } + + return ca +} + +// GoString returns a string representation of the attribute +func (a *Attribute) GoString() string { + if a == nil { + return "nil attribute" + } + + var b strings.Builder + if a.Float != nil { + b.WriteString(fmt.Sprintf("%v", *a.Float)) + } else if a.Int != nil { + b.WriteString(fmt.Sprintf("%v", *a.Int)) + } else if a.Bool != nil { + b.WriteString(fmt.Sprintf("%v", *a.Bool)) + } else if a.String != nil { + b.WriteString(*a.String) + } + + if a.Unit != "" { + b.WriteString(a.Unit) + } + + return b.String() +} + +// Validate checks if the attribute is valid +func (a *Attribute) Validate() error { + if a.Unit != "" { + if _, ok := UnitIndex[a.Unit]; !ok { + return fmt.Errorf("unrecognized unit %q", a.Unit) + } + + // Check only int/float set + if a.String != nil || a.Bool != nil { + return fmt.Errorf("unit can not be specified on a boolean or string attribute") + } + } + + // Assert only one of the attributes is set + set := 0 + if a.Float != nil { + set++ + } + if a.Int != nil { + set++ + } + if a.String != nil { + set++ + } + if a.Bool != nil { + set++ + } + + if set == 0 { + return fmt.Errorf("no attribute value set") + } else if set > 1 { + return fmt.Errorf("only one attribute value may be set") + } + + return nil +} + +// Comparable returns whether the two attributes are comparable +func (a *Attribute) Comparable(b *Attribute) bool { + if a == nil || b == nil { + return false + } + + // First use the units to decide if comparison is possible + aUnit := a.getTypedUnit() + bUnit := b.getTypedUnit() + if aUnit != nil && bUnit != nil { + return aUnit.Comparable(bUnit) + } else if aUnit != nil && bUnit == nil { + return false + } else if aUnit == nil && bUnit != nil { + return false + } + + if a.String != nil { + if b.String != nil { + return true + } + return false + } + if a.Bool != nil { + if b.Bool != nil { + return true + } + return false + } + + return true +} + +// Compare compares two attributes. If the returned boolean value is false, it +// means the values are not comparable, either because they are of different +// types (bool versus int) or the units are incompatible for comparison. +// The returned int will be 0 if a==b, -1 if a < b, and +1 if a > b for all +// values but bool. For bool it will be 0 if a==b or 1 if a!=b. +func (a *Attribute) Compare(b *Attribute) (int, bool) { + if !a.Comparable(b) { + return 0, false + } + + return a.comparator()(b) +} + +// comparator returns the comparator function for the attribute +func (a *Attribute) comparator() compareFn { + if a.Bool != nil { + return a.boolComparator + } + if a.String != nil { + return a.stringComparator + } + if a.Int != nil || a.Float != nil { + return a.numberComparator + } + + return nullComparator +} + +// boolComparator compares two boolean attributes +func (a *Attribute) boolComparator(b *Attribute) (int, bool) { + if *a.Bool == *b.Bool { + return 0, true + } + + return 1, true +} + +// stringComparator compares two string attributes +func (a *Attribute) stringComparator(b *Attribute) (int, bool) { + return strings.Compare(*a.String, *b.String), true +} + +// numberComparator compares two number attributes, having either Int or Float +// set. +func (a *Attribute) numberComparator(b *Attribute) (int, bool) { + // If they are both integers we do perfect precision comparisons + if a.Int != nil && b.Int != nil { + return a.intComparator(b) + } + + // Push both into the float space + af := a.getBigFloat() + bf := b.getBigFloat() + if af == nil || bf == nil { + return 0, false + } + + return af.Cmp(bf), true +} + +// intComparator compares two integer attributes. +func (a *Attribute) intComparator(b *Attribute) (int, bool) { + ai := a.getInt() + bi := b.getInt() + + if ai == bi { + return 0, true + } else if ai < bi { + return -1, true + } else { + return 1, true + } +} + +// nullComparator always returns false and is used when no comparison function +// is possible +func nullComparator(*Attribute) (int, bool) { + return 0, false +} + +// compareFn is used to compare two attributes. It returns -1, 0, 1 for ordering +// and a boolean for if the comparison is possible. +type compareFn func(b *Attribute) (int, bool) + +// getBigFloat returns a big.Float representation of the attribute, converting +// the value to the base unit if a unit is specified. +func (a *Attribute) getBigFloat() *big.Float { + f := new(big.Float) + f.SetPrec(floatPrecision) + if a.Int != nil { + f.SetInt64(*a.Int) + } else if a.Float != nil { + f.SetFloat64(*a.Float) + } else { + return nil + } + + // Get the unit + u := a.getTypedUnit() + + // If there is no unit just return the float + if u == nil { + return f + } + + // Convert to the base unit + multiplier := new(big.Float) + multiplier.SetPrec(floatPrecision) + multiplier.SetInt64(u.Multiplier) + if u.InverseMultiplier { + base := big.NewFloat(1.0) + base.SetPrec(floatPrecision) + multiplier = multiplier.Quo(base, multiplier) + } + + f.Mul(f, multiplier) + return f +} + +// getInt returns an int representation of the attribute, converting +// the value to the base unit if a unit is specified. +func (a *Attribute) getInt() int64 { + if a.Int == nil { + return 0 + } + + i := *a.Int + + // Get the unit + u := a.getTypedUnit() + + // If there is no unit just return the int + if u == nil { + return i + } + + if u.InverseMultiplier { + i /= u.Multiplier + } else { + i *= u.Multiplier + } + + return i +} + +// getTypedUnit returns the Unit for the attribute or nil if no unit exists. +func (a *Attribute) getTypedUnit() *Unit { + return UnitIndex[a.Unit] +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/structs/plugin_reattach_config.go b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/plugin_reattach_config.go new file mode 100644 index 0000000..efac22e --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/plugin_reattach_config.go @@ -0,0 +1,72 @@ +package structs + +import ( + "fmt" + "net" + + plugin "github.com/hashicorp/go-plugin" +) + +// ReattachConfig is a wrapper around plugin.ReattachConfig to better support +// serialization +type ReattachConfig struct { + Protocol string + Network string + Addr string + Pid int +} + +// ReattachConfigToGoPlugin converts a ReattachConfig wrapper struct into a go +// plugin ReattachConfig struct +func ReattachConfigToGoPlugin(rc *ReattachConfig) (*plugin.ReattachConfig, error) { + if rc == nil { + return nil, fmt.Errorf("nil ReattachConfig cannot be converted") + } + + plug := &plugin.ReattachConfig{ + Protocol: plugin.Protocol(rc.Protocol), + Pid: rc.Pid, + } + + switch rc.Network { + case "tcp", "tcp4", "tcp6": + addr, err := net.ResolveTCPAddr(rc.Network, rc.Addr) + if err != nil { + return nil, err + } + plug.Addr = addr + case "udp", "udp4", "udp6": + addr, err := net.ResolveUDPAddr(rc.Network, rc.Addr) + if err != nil { + return nil, err + } + plug.Addr = addr + case "unix", "unixgram", "unixpacket": + addr, err := net.ResolveUnixAddr(rc.Network, rc.Addr) + if err != nil { + return nil, err + } + plug.Addr = addr + default: + return nil, fmt.Errorf("unknown network: %s", rc.Network) + } + + return plug, nil +} + +// ReattachConfigFromGoPlugin converts a go plugin ReattachConfig into a +// ReattachConfig wrapper struct +func ReattachConfigFromGoPlugin(plug *plugin.ReattachConfig) *ReattachConfig { + if plug == nil { + return nil + } + + rc := &ReattachConfig{ + Protocol: string(plug.Protocol), + Network: plug.Addr.Network(), + Addr: plug.Addr.String(), + Pid: plug.Pid, + } + + return rc +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/attribute.pb.go b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/attribute.pb.go new file mode 100644 index 0000000..eaa0fca --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/attribute.pb.go @@ -0,0 +1,252 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/shared/structs/proto/attribute.proto + +package proto + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Attribute is used to describe the value of an attribute, optionally +// specifying units +type Attribute struct { + // Types that are valid to be assigned to Value: + // *Attribute_FloatVal + // *Attribute_IntVal + // *Attribute_StringVal + // *Attribute_BoolVal + Value isAttribute_Value `protobuf_oneof:"value"` + // unit gives the unit type: MHz, MB, etc. + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attribute) Reset() { *m = Attribute{} } +func (m *Attribute) String() string { return proto.CompactTextString(m) } +func (*Attribute) ProtoMessage() {} +func (*Attribute) Descriptor() ([]byte, []int) { + return fileDescriptor_attribute_aa187fb710a98f5a, []int{0} +} +func (m *Attribute) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Attribute.Unmarshal(m, b) +} +func (m *Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Attribute.Marshal(b, m, deterministic) +} +func (dst *Attribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attribute.Merge(dst, src) +} +func (m *Attribute) XXX_Size() int { + return xxx_messageInfo_Attribute.Size(m) +} +func (m *Attribute) XXX_DiscardUnknown() { + xxx_messageInfo_Attribute.DiscardUnknown(m) +} + +var xxx_messageInfo_Attribute proto.InternalMessageInfo + +type isAttribute_Value interface { + isAttribute_Value() +} + +type Attribute_FloatVal struct { + FloatVal float64 `protobuf:"fixed64,1,opt,name=float_val,json=floatVal,proto3,oneof"` +} + +type Attribute_IntVal struct { + IntVal int64 `protobuf:"varint,2,opt,name=int_val,json=intVal,proto3,oneof"` +} + +type Attribute_StringVal struct { + StringVal string `protobuf:"bytes,3,opt,name=string_val,json=stringVal,proto3,oneof"` +} + +type Attribute_BoolVal struct { + BoolVal bool `protobuf:"varint,4,opt,name=bool_val,json=boolVal,proto3,oneof"` +} + +func (*Attribute_FloatVal) isAttribute_Value() {} + +func (*Attribute_IntVal) isAttribute_Value() {} + +func (*Attribute_StringVal) isAttribute_Value() {} + +func (*Attribute_BoolVal) isAttribute_Value() {} + +func (m *Attribute) GetValue() isAttribute_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Attribute) GetFloatVal() float64 { + if x, ok := m.GetValue().(*Attribute_FloatVal); ok { + return x.FloatVal + } + return 0 +} + +func (m *Attribute) GetIntVal() int64 { + if x, ok := m.GetValue().(*Attribute_IntVal); ok { + return x.IntVal + } + return 0 +} + +func (m *Attribute) GetStringVal() string { + if x, ok := m.GetValue().(*Attribute_StringVal); ok { + return x.StringVal + } + return "" +} + +func (m *Attribute) GetBoolVal() bool { + if x, ok := m.GetValue().(*Attribute_BoolVal); ok { + return x.BoolVal + } + return false +} + +func (m *Attribute) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Attribute) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Attribute_OneofMarshaler, _Attribute_OneofUnmarshaler, _Attribute_OneofSizer, []interface{}{ + (*Attribute_FloatVal)(nil), + (*Attribute_IntVal)(nil), + (*Attribute_StringVal)(nil), + (*Attribute_BoolVal)(nil), + } +} + +func _Attribute_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Attribute) + // value + switch x := m.Value.(type) { + case *Attribute_FloatVal: + b.EncodeVarint(1<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.FloatVal)) + case *Attribute_IntVal: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IntVal)) + case *Attribute_StringVal: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringVal) + case *Attribute_BoolVal: + t := uint64(0) + if x.BoolVal { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("Attribute.Value has unexpected type %T", x) + } + return nil +} + +func _Attribute_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Attribute) + switch tag { + case 1: // value.float_val + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &Attribute_FloatVal{math.Float64frombits(x)} + return true, err + case 2: // value.int_val + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &Attribute_IntVal{int64(x)} + return true, err + case 3: // value.string_val + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &Attribute_StringVal{x} + return true, err + case 4: // value.bool_val + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &Attribute_BoolVal{x != 0} + return true, err + default: + return false, nil + } +} + +func _Attribute_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Attribute) + // value + switch x := m.Value.(type) { + case *Attribute_FloatVal: + n += 1 // tag and wire + n += 8 + case *Attribute_IntVal: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.IntVal)) + case *Attribute_StringVal: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.StringVal))) + n += len(x.StringVal) + case *Attribute_BoolVal: + n += 1 // tag and wire + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*Attribute)(nil), "hashicorp.nomad.plugins.shared.structs.Attribute") +} + +func init() { + proto.RegisterFile("plugins/shared/structs/proto/attribute.proto", fileDescriptor_attribute_aa187fb710a98f5a) +} + +var fileDescriptor_attribute_aa187fb710a98f5a = []byte{ + // 218 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8f, 0xb1, 0x4e, 0xc3, 0x30, + 0x10, 0x40, 0x63, 0xda, 0x34, 0xc9, 0x8d, 0x99, 0x8a, 0x10, 0x22, 0x62, 0x40, 0x19, 0x90, 0x33, + 0xf0, 0x05, 0x74, 0xf2, 0xec, 0x81, 0x81, 0x05, 0x5d, 0xda, 0xd0, 0x58, 0x32, 0x76, 0x64, 0x9f, + 0xfb, 0x3d, 0x7c, 0x2a, 0xf2, 0x25, 0x4c, 0xf6, 0xbd, 0x77, 0x6f, 0x38, 0x78, 0x5d, 0x6c, 0xba, + 0x1a, 0x17, 0x87, 0x38, 0x63, 0x98, 0x2e, 0x43, 0xa4, 0x90, 0xce, 0x14, 0x87, 0x25, 0x78, 0xf2, + 0x03, 0x12, 0x05, 0x33, 0x26, 0x9a, 0x24, 0xcf, 0xed, 0xcb, 0x8c, 0x71, 0x36, 0x67, 0x1f, 0x16, + 0xe9, 0xfc, 0x0f, 0x5e, 0xe4, 0x56, 0xcb, 0xb5, 0x96, 0x5b, 0xfd, 0xfc, 0x2b, 0xa0, 0x79, 0xff, + 0x6f, 0xdb, 0x47, 0x68, 0xbe, 0xad, 0x47, 0xfa, 0xba, 0xa1, 0x3d, 0x8a, 0x4e, 0xf4, 0x42, 0x15, + 0xba, 0x66, 0xf4, 0x81, 0xb6, 0xbd, 0x87, 0xca, 0xb8, 0x55, 0xde, 0x75, 0xa2, 0xdf, 0xa9, 0x42, + 0x1f, 0x8c, 0x63, 0xf5, 0x04, 0x10, 0x29, 0x18, 0x77, 0x65, 0xbb, 0xeb, 0x44, 0xdf, 0xa8, 0x42, + 0x37, 0x2b, 0xcb, 0x0b, 0x0f, 0x50, 0x8f, 0xde, 0x5b, 0xd6, 0xfb, 0x4e, 0xf4, 0xb5, 0x2a, 0x74, + 0x95, 0x49, 0x96, 0x2d, 0xec, 0x93, 0x33, 0x74, 0x2c, 0x73, 0xa7, 0xf9, 0x7f, 0xaa, 0xa0, 0xbc, + 0xa1, 0x4d, 0xd3, 0xa9, 0xfa, 0x2c, 0xf9, 0xa6, 0xf1, 0xc0, 0xcf, 0xdb, 0x5f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x77, 0x2b, 0x7a, 0x7c, 0x0a, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/attribute.proto b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/attribute.proto new file mode 100644 index 0000000..4df7163 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/attribute.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; +package hashicorp.nomad.plugins.shared.structs; +option go_package = "proto"; + +// Attribute is used to describe the value of an attribute, optionally +// specifying units +message Attribute { + oneof value { + // float_val exposes a floating point value. + double float_val = 1; + + // int_numerator_val exposes a int value. + int64 int_val = 2; + + // string_val exposes a string value. + string string_val = 3; + + // bool_val exposes a boolean statistic. + bool bool_val = 4; + } + + // unit gives the unit type: MHz, MB, etc. + string unit = 5; +} + diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/recoverable_error.pb.go b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/recoverable_error.pb.go new file mode 100644 index 0000000..fdaf797 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/recoverable_error.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/shared/structs/proto/recoverable_error.proto + +package proto + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// RecoverableError is used with a grpc Status to indicate if the error is one +// which is recoverable and can be reattempted by the client. +type RecoverableError struct { + Recoverable bool `protobuf:"varint,1,opt,name=recoverable,proto3" json:"recoverable,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecoverableError) Reset() { *m = RecoverableError{} } +func (m *RecoverableError) String() string { return proto.CompactTextString(m) } +func (*RecoverableError) ProtoMessage() {} +func (*RecoverableError) Descriptor() ([]byte, []int) { + return fileDescriptor_recoverable_error_f746254fd69675b0, []int{0} +} +func (m *RecoverableError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecoverableError.Unmarshal(m, b) +} +func (m *RecoverableError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecoverableError.Marshal(b, m, deterministic) +} +func (dst *RecoverableError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecoverableError.Merge(dst, src) +} +func (m *RecoverableError) XXX_Size() int { + return xxx_messageInfo_RecoverableError.Size(m) +} +func (m *RecoverableError) XXX_DiscardUnknown() { + xxx_messageInfo_RecoverableError.DiscardUnknown(m) +} + +var xxx_messageInfo_RecoverableError proto.InternalMessageInfo + +func (m *RecoverableError) GetRecoverable() bool { + if m != nil { + return m.Recoverable + } + return false +} + +func init() { + proto.RegisterType((*RecoverableError)(nil), "hashicorp.nomad.plugins.shared.structs.RecoverableError") +} + +func init() { + proto.RegisterFile("plugins/shared/structs/proto/recoverable_error.proto", fileDescriptor_recoverable_error_f746254fd69675b0) +} + +var fileDescriptor_recoverable_error_f746254fd69675b0 = []byte{ + // 138 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0x2b, 0xd6, 0x2f, 0xce, 0x48, 0x2c, 0x4a, 0x4d, 0xd1, 0x2f, 0x2e, 0x29, 0x2a, 0x4d, + 0x2e, 0x29, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4b, 0x2d, + 0x4a, 0x4c, 0xca, 0x49, 0x8d, 0x4f, 0x2d, 0x2a, 0xca, 0x2f, 0xd2, 0x03, 0x8b, 0x0b, 0xa9, 0x65, + 0x24, 0x16, 0x67, 0x64, 0x26, 0xe7, 0x17, 0x15, 0xe8, 0xe5, 0xe5, 0xe7, 0x26, 0xa6, 0xe8, 0x41, + 0x4d, 0xd1, 0x83, 0x98, 0xa2, 0x07, 0x35, 0x45, 0xc9, 0x84, 0x4b, 0x20, 0x08, 0x61, 0x84, 0x2b, + 0xc8, 0x04, 0x21, 0x05, 0x2e, 0x6e, 0x24, 0x63, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x82, 0x90, + 0x85, 0x9c, 0xd8, 0xa3, 0x58, 0xc1, 0xd6, 0x24, 0xb1, 0x81, 0x29, 0x63, 0x40, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xc5, 0x45, 0x79, 0xed, 0xa5, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/recoverable_error.proto b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/recoverable_error.proto new file mode 100644 index 0000000..abd4f6b --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/recoverable_error.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; +package hashicorp.nomad.plugins.shared.structs; +option go_package = "proto"; + +// RecoverableError is used with a grpc Status to indicate if the error is one +// which is recoverable and can be reattempted by the client. +message RecoverableError { + bool recoverable = 1; +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/stats.pb.go b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/stats.pb.go new file mode 100644 index 0000000..38267eb --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/stats.pb.go @@ -0,0 +1,220 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: plugins/shared/structs/proto/stats.proto + +package proto + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// StatObject is a collection of statistics either exposed at the top +// level or via nested StatObjects. +type StatObject struct { + // nested is a mapping of object name to a nested stats object. + Nested map[string]*StatObject `protobuf:"bytes,1,rep,name=nested,proto3" json:"nested,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // attributes is a mapping of statistic name to its value. + Attributes map[string]*StatValue `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatObject) Reset() { *m = StatObject{} } +func (m *StatObject) String() string { return proto.CompactTextString(m) } +func (*StatObject) ProtoMessage() {} +func (*StatObject) Descriptor() ([]byte, []int) { + return fileDescriptor_stats_73a5e405c9cf442c, []int{0} +} +func (m *StatObject) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatObject.Unmarshal(m, b) +} +func (m *StatObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatObject.Marshal(b, m, deterministic) +} +func (dst *StatObject) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatObject.Merge(dst, src) +} +func (m *StatObject) XXX_Size() int { + return xxx_messageInfo_StatObject.Size(m) +} +func (m *StatObject) XXX_DiscardUnknown() { + xxx_messageInfo_StatObject.DiscardUnknown(m) +} + +var xxx_messageInfo_StatObject proto.InternalMessageInfo + +func (m *StatObject) GetNested() map[string]*StatObject { + if m != nil { + return m.Nested + } + return nil +} + +func (m *StatObject) GetAttributes() map[string]*StatValue { + if m != nil { + return m.Attributes + } + return nil +} + +// StatValue exposes the values of a particular statistic. The value may +// be of type double, integer, string or boolean. Numeric types can be +// exposed as a single value or as a fraction. +type StatValue struct { + // float_numerator_val exposes a floating point value. If denominator + // is set it is assumed to be a fractional value, otherwise it is a + // scalar. + FloatNumeratorVal *wrappers.DoubleValue `protobuf:"bytes,1,opt,name=float_numerator_val,json=floatNumeratorVal,proto3" json:"float_numerator_val,omitempty"` + FloatDenominatorVal *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=float_denominator_val,json=floatDenominatorVal,proto3" json:"float_denominator_val,omitempty"` + // int_numerator_val exposes a int value. If denominator + // is set it is assumed to be a fractional value, otherwise it is a + // scalar. + IntNumeratorVal *wrappers.Int64Value `protobuf:"bytes,3,opt,name=int_numerator_val,json=intNumeratorVal,proto3" json:"int_numerator_val,omitempty"` + IntDenominatorVal *wrappers.Int64Value `protobuf:"bytes,4,opt,name=int_denominator_val,json=intDenominatorVal,proto3" json:"int_denominator_val,omitempty"` + // string_val exposes a string value. These are likely annotations. + StringVal *wrappers.StringValue `protobuf:"bytes,5,opt,name=string_val,json=stringVal,proto3" json:"string_val,omitempty"` + // bool_val exposes a boolean statistic. + BoolVal *wrappers.BoolValue `protobuf:"bytes,6,opt,name=bool_val,json=boolVal,proto3" json:"bool_val,omitempty"` + // unit gives the unit type: °F, %, MHz, MB, etc. + Unit string `protobuf:"bytes,7,opt,name=unit,proto3" json:"unit,omitempty"` + // desc provides a human readable description of the statistic. + Desc string `protobuf:"bytes,8,opt,name=desc,proto3" json:"desc,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatValue) Reset() { *m = StatValue{} } +func (m *StatValue) String() string { return proto.CompactTextString(m) } +func (*StatValue) ProtoMessage() {} +func (*StatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_stats_73a5e405c9cf442c, []int{1} +} +func (m *StatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatValue.Unmarshal(m, b) +} +func (m *StatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatValue.Marshal(b, m, deterministic) +} +func (dst *StatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatValue.Merge(dst, src) +} +func (m *StatValue) XXX_Size() int { + return xxx_messageInfo_StatValue.Size(m) +} +func (m *StatValue) XXX_DiscardUnknown() { + xxx_messageInfo_StatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StatValue proto.InternalMessageInfo + +func (m *StatValue) GetFloatNumeratorVal() *wrappers.DoubleValue { + if m != nil { + return m.FloatNumeratorVal + } + return nil +} + +func (m *StatValue) GetFloatDenominatorVal() *wrappers.DoubleValue { + if m != nil { + return m.FloatDenominatorVal + } + return nil +} + +func (m *StatValue) GetIntNumeratorVal() *wrappers.Int64Value { + if m != nil { + return m.IntNumeratorVal + } + return nil +} + +func (m *StatValue) GetIntDenominatorVal() *wrappers.Int64Value { + if m != nil { + return m.IntDenominatorVal + } + return nil +} + +func (m *StatValue) GetStringVal() *wrappers.StringValue { + if m != nil { + return m.StringVal + } + return nil +} + +func (m *StatValue) GetBoolVal() *wrappers.BoolValue { + if m != nil { + return m.BoolVal + } + return nil +} + +func (m *StatValue) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *StatValue) GetDesc() string { + if m != nil { + return m.Desc + } + return "" +} + +func init() { + proto.RegisterType((*StatObject)(nil), "hashicorp.nomad.plugins.shared.structs.StatObject") + proto.RegisterMapType((map[string]*StatValue)(nil), "hashicorp.nomad.plugins.shared.structs.StatObject.AttributesEntry") + proto.RegisterMapType((map[string]*StatObject)(nil), "hashicorp.nomad.plugins.shared.structs.StatObject.NestedEntry") + proto.RegisterType((*StatValue)(nil), "hashicorp.nomad.plugins.shared.structs.StatValue") +} + +func init() { + proto.RegisterFile("plugins/shared/structs/proto/stats.proto", fileDescriptor_stats_73a5e405c9cf442c) +} + +var fileDescriptor_stats_73a5e405c9cf442c = []byte{ + // 444 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0xd2, 0xdf, 0x6a, 0x13, 0x41, + 0x14, 0x06, 0x70, 0x36, 0xdb, 0x24, 0xcd, 0xc9, 0x45, 0xed, 0x14, 0x61, 0x89, 0x22, 0xa1, 0x17, + 0x92, 0xab, 0x59, 0x8c, 0x7f, 0x10, 0x05, 0xc1, 0x50, 0xa9, 0xa2, 0x54, 0x49, 0x21, 0x17, 0xde, + 0x94, 0xd9, 0xec, 0x74, 0x33, 0x3a, 0x3b, 0xb3, 0xcc, 0x9c, 0xa9, 0xf4, 0x91, 0x7c, 0x28, 0xdf, + 0x45, 0x76, 0x66, 0x93, 0xa6, 0x5b, 0xc5, 0xd6, 0xab, 0x9c, 0x4c, 0xf2, 0xfd, 0xbe, 0xb3, 0xcc, + 0xc2, 0xa4, 0x92, 0xae, 0x10, 0xca, 0xa6, 0x76, 0xc5, 0x0c, 0xcf, 0x53, 0x8b, 0xc6, 0x2d, 0xd1, + 0xa6, 0x95, 0xd1, 0xa8, 0x53, 0x8b, 0x0c, 0x2d, 0xf5, 0x33, 0x79, 0xbc, 0x62, 0x76, 0x25, 0x96, + 0xda, 0x54, 0x54, 0xe9, 0x92, 0xe5, 0xb4, 0x49, 0xd2, 0x90, 0xa4, 0x4d, 0x72, 0xf4, 0xa8, 0xd0, + 0xba, 0x90, 0x3c, 0x08, 0x99, 0x3b, 0x4f, 0x7f, 0x18, 0x56, 0x55, 0xdc, 0x34, 0xce, 0xe1, 0xcf, + 0x18, 0xe0, 0x14, 0x19, 0x7e, 0xce, 0xbe, 0xf1, 0x25, 0x92, 0x05, 0xf4, 0x14, 0xb7, 0xc8, 0xf3, + 0x24, 0x1a, 0xc7, 0x93, 0xe1, 0xf4, 0x0d, 0xbd, 0x5d, 0x0f, 0xbd, 0x32, 0xe8, 0x89, 0x07, 0xde, + 0x29, 0x34, 0x97, 0xf3, 0x46, 0x23, 0x19, 0x00, 0x43, 0x34, 0x22, 0x73, 0xc8, 0x6d, 0xd2, 0xf1, + 0xf6, 0xec, 0x3f, 0xec, 0xb7, 0x1b, 0x24, 0xf8, 0x5b, 0xea, 0xa8, 0x84, 0xe1, 0x56, 0x35, 0xb9, + 0x07, 0xf1, 0x77, 0x7e, 0x99, 0x44, 0xe3, 0x68, 0x32, 0x98, 0xd7, 0x23, 0x79, 0x0f, 0xdd, 0x0b, + 0x26, 0x1d, 0x4f, 0x3a, 0xe3, 0x68, 0x32, 0x9c, 0x4e, 0xef, 0xde, 0x3f, 0x0f, 0xc0, 0xab, 0xce, + 0xcb, 0x68, 0x54, 0xc1, 0x5e, 0x6b, 0x9b, 0x3f, 0x54, 0x1e, 0x5f, 0xaf, 0x7c, 0x72, 0x97, 0xca, + 0x45, 0x1d, 0xdc, 0x6a, 0x3c, 0xfc, 0x15, 0xc3, 0x60, 0xf3, 0x03, 0xf9, 0x04, 0x07, 0xe7, 0x52, + 0x33, 0x3c, 0x53, 0xae, 0xe4, 0x86, 0xa1, 0x36, 0x67, 0x17, 0x4c, 0xfa, 0xf2, 0xe1, 0xf4, 0x21, + 0x0d, 0xf7, 0x4e, 0xd7, 0xf7, 0x4e, 0x8f, 0xb4, 0xcb, 0x24, 0x0f, 0xe6, 0xbe, 0x0f, 0x9e, 0xac, + 0x73, 0x0b, 0x26, 0xc9, 0x17, 0xb8, 0x1f, 0xb4, 0x9c, 0x2b, 0x5d, 0x0a, 0xb5, 0xf1, 0x3a, 0xb7, + 0xf0, 0xc2, 0x22, 0x47, 0x57, 0xc9, 0x5a, 0x3c, 0x86, 0x7d, 0xa1, 0xda, 0xdb, 0xc5, 0x5e, 0x7b, + 0x70, 0x43, 0xfb, 0xa0, 0xf0, 0xc5, 0xb3, 0x80, 0xed, 0x09, 0x75, 0x7d, 0xb5, 0x8f, 0x70, 0x50, + 0x43, 0xed, 0xc5, 0x76, 0xfe, 0x4d, 0xd5, 0x0b, 0xb4, 0xb6, 0x7a, 0x0d, 0x60, 0xd1, 0x08, 0x55, + 0x78, 0xa3, 0xfb, 0x97, 0x87, 0x3b, 0xf5, 0x7f, 0x09, 0xc8, 0xc0, 0xae, 0xbf, 0x90, 0xe7, 0xb0, + 0x9b, 0x69, 0x2d, 0x7d, 0xb4, 0xe7, 0xa3, 0xa3, 0x1b, 0xd1, 0x99, 0xd6, 0x32, 0x04, 0xfb, 0x59, + 0x18, 0x09, 0x81, 0x1d, 0xa7, 0x04, 0x26, 0x7d, 0xff, 0x5e, 0xf8, 0xb9, 0x3e, 0xcb, 0xb9, 0x5d, + 0x26, 0xbb, 0xe1, 0xac, 0x9e, 0x67, 0xfd, 0xaf, 0xdd, 0xc0, 0xf4, 0xfc, 0xc7, 0xd3, 0xdf, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x7d, 0x1d, 0x2d, 0xe3, 0x0f, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/stats.proto b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/stats.proto new file mode 100644 index 0000000..76c808b --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/proto/stats.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; +package hashicorp.nomad.plugins.shared.structs; +option go_package = "proto"; + +import "google/protobuf/wrappers.proto"; + +// StatObject is a collection of statistics either exposed at the top +// level or via nested StatObjects. +message StatObject { + // nested is a mapping of object name to a nested stats object. + map nested = 1; + + // attributes is a mapping of statistic name to its value. + map attributes = 2; +} + +// StatValue exposes the values of a particular statistic. The value may +// be of type double, integer, string or boolean. Numeric types can be +// exposed as a single value or as a fraction. +message StatValue { + // float_numerator_val exposes a floating point value. If denominator + // is set it is assumed to be a fractional value, otherwise it is a + // scalar. + google.protobuf.DoubleValue float_numerator_val = 1; + google.protobuf.DoubleValue float_denominator_val = 2; + + // int_numerator_val exposes a int value. If denominator + // is set it is assumed to be a fractional value, otherwise it is a + // scalar. + google.protobuf.Int64Value int_numerator_val = 3; + google.protobuf.Int64Value int_denominator_val = 4; + + // string_val exposes a string value. These are likely annotations. + google.protobuf.StringValue string_val = 5; + + // bool_val exposes a boolean statistic. + google.protobuf.BoolValue bool_val = 6; + + // unit gives the unit type: °F, %, MHz, MB, etc. + string unit = 7; + + // desc provides a human readable description of the statistic. + string desc = 8; +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/structs/stats.go b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/stats.go new file mode 100644 index 0000000..5ebc778 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/stats.go @@ -0,0 +1,38 @@ +package structs + +// StatObject is a collection of statistics either exposed at the top +// level or via nested StatObjects. +type StatObject struct { + // Nested is a mapping of object name to a nested stats object. + Nested map[string]*StatObject + + // Attributes is a mapping of statistic name to its value. + Attributes map[string]*StatValue +} + +// StatValue exposes the values of a particular statistic. The value may be of +// type float, integer, string or boolean. Numeric types can be exposed as a +// single value or as a fraction. +type StatValue struct { + // FloatNumeratorVal exposes a floating point value. If denominator is set + // it is assumed to be a fractional value, otherwise it is a scalar. + FloatNumeratorVal *float64 `json:",omitempty"` + FloatDenominatorVal *float64 `json:",omitempty"` + + // IntNumeratorVal exposes a int value. If denominator is set it is assumed + // to be a fractional value, otherwise it is a scalar. + IntNumeratorVal *int64 `json:",omitempty"` + IntDenominatorVal *int64 `json:",omitempty"` + + // StringVal exposes a string value. These are likely annotations. + StringVal *string `json:",omitempty"` + + // BoolVal exposes a boolean statistic. + BoolVal *bool `json:",omitempty"` + + // Unit gives the unit type: °F, %, MHz, MB, etc. + Unit string `json:",omitempty"` + + // Desc provides a human readable description of the statistic. + Desc string `json:",omitempty"` +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/structs/units.go b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/units.go new file mode 100644 index 0000000..4310b84 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/units.go @@ -0,0 +1,260 @@ +package structs + +import "sort" + +const ( + // Binary SI Byte Units + UnitKiB = "KiB" + UnitMiB = "MiB" + UnitGiB = "GiB" + UnitTiB = "TiB" + UnitPiB = "PiB" + UnitEiB = "EiB" + + // Decimal SI Byte Units + UnitkB = "kB" + UnitKB = "KB" + UnitMB = "MB" + UnitGB = "GB" + UnitTB = "TB" + UnitPB = "PB" + UnitEB = "EB" + + // Binary SI Byte Rates + UnitKiBPerS = "KiB/s" + UnitMiBPerS = "MiB/s" + UnitGiBPerS = "GiB/s" + UnitTiBPerS = "TiB/s" + UnitPiBPerS = "PiB/s" + UnitEiBPerS = "EiB/s" + + // Decimal SI Byte Rates + UnitkBPerS = "kB/s" + UnitKBPerS = "KB/s" + UnitMBPerS = "MB/s" + UnitGBPerS = "GB/s" + UnitTBPerS = "TB/s" + UnitPBPerS = "PB/s" + UnitEBPerS = "EB/s" + + // Hertz units + UnitMHz = "MHz" + UnitGHz = "GHz" + + // Watts units + UnitmW = "mW" + UnitW = "W" + UnitkW = "kW" + UnitMW = "MW" + UnitGW = "GW" +) + +var ( + // numUnits is the number of known units + numUnits = len(binarySIBytes) + len(decimalSIBytes) + len(binarySIByteRates) + len(decimalSIByteRates) + len(watts) + len(hertz) + + // UnitIndex is a map of unit name to unit + UnitIndex = make(map[string]*Unit, numUnits) + + // lengthSortedUnits is a list of unit names sorted by length with longest + // first + lengthSortedUnits = make([]string, 0, numUnits) + + binarySIBytes = []*Unit{ + { + Name: UnitKiB, + Base: UnitByte, + Multiplier: 1 << 10, + }, + { + Name: UnitMiB, + Base: UnitByte, + Multiplier: 1 << 20, + }, + { + Name: UnitGiB, + Base: UnitByte, + Multiplier: 1 << 30, + }, + { + Name: UnitTiB, + Base: UnitByte, + Multiplier: 1 << 40, + }, + { + Name: UnitPiB, + Base: UnitByte, + Multiplier: 1 << 50, + }, + { + Name: UnitEiB, + Base: UnitByte, + Multiplier: 1 << 60, + }, + } + + decimalSIBytes = []*Unit{ + { + Name: UnitkB, + Base: UnitByte, + Multiplier: Pow(1000, 1), + }, + { + Name: UnitKB, // Alternative name for kB + Base: UnitByte, + Multiplier: Pow(1000, 1), + }, + { + Name: UnitMB, + Base: UnitByte, + Multiplier: Pow(1000, 2), + }, + { + Name: UnitGB, + Base: UnitByte, + Multiplier: Pow(1000, 3), + }, + { + Name: UnitTB, + Base: UnitByte, + Multiplier: Pow(1000, 4), + }, + { + Name: UnitPB, + Base: UnitByte, + Multiplier: Pow(1000, 5), + }, + { + Name: UnitEB, + Base: UnitByte, + Multiplier: Pow(1000, 6), + }, + } + + binarySIByteRates = []*Unit{ + { + Name: UnitKiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 10, + }, + { + Name: UnitMiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 20, + }, + { + Name: UnitGiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 30, + }, + { + Name: UnitTiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 40, + }, + { + Name: UnitPiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 50, + }, + { + Name: UnitEiBPerS, + Base: UnitByteRate, + Multiplier: 1 << 60, + }, + } + + decimalSIByteRates = []*Unit{ + { + Name: UnitkBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 1), + }, + { + Name: UnitKBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 1), + }, + { + Name: UnitMBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 2), + }, + { + Name: UnitGBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 3), + }, + { + Name: UnitTBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 4), + }, + { + Name: UnitPBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 5), + }, + { + Name: UnitEBPerS, + Base: UnitByteRate, + Multiplier: Pow(1000, 6), + }, + } + + hertz = []*Unit{ + { + Name: UnitMHz, + Base: UnitHertz, + Multiplier: Pow(1000, 2), + }, + { + Name: UnitGHz, + Base: UnitHertz, + Multiplier: Pow(1000, 3), + }, + } + + watts = []*Unit{ + { + Name: UnitmW, + Base: UnitWatt, + Multiplier: Pow(10, 3), + InverseMultiplier: true, + }, + { + Name: UnitW, + Base: UnitWatt, + Multiplier: 1, + }, + { + Name: UnitkW, + Base: UnitWatt, + Multiplier: Pow(10, 3), + }, + { + Name: UnitMW, + Base: UnitWatt, + Multiplier: Pow(10, 6), + }, + { + Name: UnitGW, + Base: UnitWatt, + Multiplier: Pow(10, 9), + }, + } +) + +func init() { + // Build the index + for _, units := range [][]*Unit{binarySIBytes, decimalSIBytes, binarySIByteRates, decimalSIByteRates, watts, hertz} { + for _, unit := range units { + UnitIndex[unit.Name] = unit + lengthSortedUnits = append(lengthSortedUnits, unit.Name) + } + } + + sort.Slice(lengthSortedUnits, func(i, j int) bool { + return len(lengthSortedUnits[i]) >= len(lengthSortedUnits[j]) + }) +} diff --git a/vendor/github.com/hashicorp/nomad/plugins/shared/structs/util.go b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/util.go new file mode 100644 index 0000000..2a4b9b0 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/plugins/shared/structs/util.go @@ -0,0 +1,254 @@ +package structs + +import ( + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/plugins/shared/structs/proto" +) + +func ConvertProtoAttribute(in *proto.Attribute) *Attribute { + out := &Attribute{ + Unit: in.Unit, + } + + switch in.Value.(type) { + case *proto.Attribute_BoolVal: + out.Bool = helper.BoolToPtr(in.GetBoolVal()) + case *proto.Attribute_FloatVal: + out.Float = helper.Float64ToPtr(in.GetFloatVal()) + case *proto.Attribute_IntVal: + out.Int = helper.Int64ToPtr(in.GetIntVal()) + case *proto.Attribute_StringVal: + out.String = helper.StringToPtr(in.GetStringVal()) + default: + } + + return out +} + +func ConvertProtoAttributeMap(in map[string]*proto.Attribute) map[string]*Attribute { + if in == nil { + return nil + } + + out := make(map[string]*Attribute, len(in)) + for k, a := range in { + out[k] = ConvertProtoAttribute(a) + } + + return out +} + +func ConvertStructsAttribute(in *Attribute) *proto.Attribute { + out := &proto.Attribute{ + Unit: in.Unit, + } + + if in.Int != nil { + out.Value = &proto.Attribute_IntVal{ + IntVal: *in.Int, + } + } else if in.Float != nil { + out.Value = &proto.Attribute_FloatVal{ + FloatVal: *in.Float, + } + } else if in.String != nil { + out.Value = &proto.Attribute_StringVal{ + StringVal: *in.String, + } + } else if in.Bool != nil { + out.Value = &proto.Attribute_BoolVal{ + BoolVal: *in.Bool, + } + } + + return out +} + +func ConvertStructAttributeMap(in map[string]*Attribute) map[string]*proto.Attribute { + if in == nil { + return nil + } + + out := make(map[string]*proto.Attribute, len(in)) + for k, a := range in { + out[k] = ConvertStructsAttribute(a) + } + + return out +} + +func Pow(a, b int64) int64 { + var p int64 = 1 + for b > 0 { + if b&1 != 0 { + p *= a + } + b >>= 1 + a *= a + } + return p +} + +// CopyMapStringAttribute copies a map of string to Attribute +func CopyMapStringAttribute(in map[string]*Attribute) map[string]*Attribute { + if in == nil { + return nil + } + + out := make(map[string]*Attribute, len(in)) + for k, v := range in { + out[k] = v.Copy() + } + return out +} + +// ConvertProtoStatObject converts between a proto and struct StatObject +func ConvertProtoStatObject(in *proto.StatObject) *StatObject { + if in == nil { + return nil + } + + out := &StatObject{ + Nested: make(map[string]*StatObject, len(in.Nested)), + Attributes: make(map[string]*StatValue, len(in.Attributes)), + } + + for k, v := range in.Nested { + out.Nested[k] = ConvertProtoStatObject(v) + } + + for k, v := range in.Attributes { + out.Attributes[k] = ConvertProtoStatValue(v) + } + + return out +} + +// ConvertProtoStatValue converts between a proto and struct StatValue +func ConvertProtoStatValue(in *proto.StatValue) *StatValue { + if in == nil { + return nil + } + + return &StatValue{ + FloatNumeratorVal: unwrapDouble(in.FloatNumeratorVal), + FloatDenominatorVal: unwrapDouble(in.FloatDenominatorVal), + IntNumeratorVal: unwrapInt64(in.IntNumeratorVal), + IntDenominatorVal: unwrapInt64(in.IntDenominatorVal), + StringVal: unwrapString(in.StringVal), + BoolVal: unwrapBool(in.BoolVal), + Unit: in.Unit, + Desc: in.Desc, + } +} + +// ConvertStructStatObject converts between a struct and proto StatObject +func ConvertStructStatObject(in *StatObject) *proto.StatObject { + if in == nil { + return nil + } + + out := &proto.StatObject{ + Nested: make(map[string]*proto.StatObject, len(in.Nested)), + Attributes: make(map[string]*proto.StatValue, len(in.Attributes)), + } + + for k, v := range in.Nested { + out.Nested[k] = ConvertStructStatObject(v) + } + + for k, v := range in.Attributes { + out.Attributes[k] = ConvertStructStatValue(v) + } + + return out +} + +// ConvertStructStatValue converts between a struct and proto StatValue +func ConvertStructStatValue(in *StatValue) *proto.StatValue { + if in == nil { + return nil + } + + return &proto.StatValue{ + FloatNumeratorVal: wrapDouble(in.FloatNumeratorVal), + FloatDenominatorVal: wrapDouble(in.FloatDenominatorVal), + IntNumeratorVal: wrapInt64(in.IntNumeratorVal), + IntDenominatorVal: wrapInt64(in.IntDenominatorVal), + StringVal: wrapString(in.StringVal), + BoolVal: wrapBool(in.BoolVal), + Unit: in.Unit, + Desc: in.Desc, + } +} + +// Helper functions for proto wrapping + +func unwrapDouble(w *wrappers.DoubleValue) *float64 { + if w == nil { + return nil + } + + v := w.Value + return &v +} + +func wrapDouble(v *float64) *wrappers.DoubleValue { + if v == nil { + return nil + } + + return &wrappers.DoubleValue{Value: *v} +} + +func unwrapInt64(w *wrappers.Int64Value) *int64 { + if w == nil { + return nil + } + + v := w.Value + return &v +} + +func wrapInt64(v *int64) *wrappers.Int64Value { + if v == nil { + return nil + } + + return &wrappers.Int64Value{Value: *v} +} + +func unwrapString(w *wrappers.StringValue) *string { + if w == nil { + return nil + } + + v := w.Value + return &v +} + +func wrapString(v *string) *wrappers.StringValue { + if v == nil { + return nil + } + + return &wrappers.StringValue{Value: *v} +} + +func unwrapBool(w *wrappers.BoolValue) *bool { + if w == nil { + return nil + } + + v := w.Value + return &v +} + +func wrapBool(v *bool) *wrappers.BoolValue { + if v == nil { + return nil + } + + return &wrappers.BoolValue{Value: *v} +} diff --git a/vendor/github.com/hashicorp/raft/.gitignore b/vendor/github.com/hashicorp/raft/.gitignore new file mode 100644 index 0000000..8365624 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/raft/.travis.yml b/vendor/github.com/hashicorp/raft/.travis.yml new file mode 100644 index 0000000..9798428 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + # Disabled until https://github.com/armon/go-metrics/issues/59 is fixed + # - 1.6 + - 1.8 + - 1.9 + - tip + +install: make deps +script: + - make integ + +notifications: + flowdock: + secure: fZrcf9rlh2IrQrlch1sHkn3YI7SKvjGnAl/zyV5D6NROe1Bbr6d3QRMuCXWWdhJHzjKmXk5rIzbqJhUc0PNF7YjxGNKSzqWMQ56KcvN1k8DzlqxpqkcA3Jbs6fXCWo2fssRtZ7hj/wOP1f5n6cc7kzHDt9dgaYJ6nO2fqNPJiTc= + diff --git a/vendor/github.com/hashicorp/raft/LICENSE b/vendor/github.com/hashicorp/raft/LICENSE new file mode 100644 index 0000000..c33dcc7 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/raft/Makefile b/vendor/github.com/hashicorp/raft/Makefile new file mode 100644 index 0000000..75d947f --- /dev/null +++ b/vendor/github.com/hashicorp/raft/Makefile @@ -0,0 +1,20 @@ +DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...) + +test: + go test -timeout=60s . + +integ: test + INTEG_TESTS=yes go test -timeout=25s -run=Integ . + +fuzz: + go test -timeout=300s ./fuzzy + +deps: + go get -d -v ./... + echo $(DEPS) | xargs -n1 go get -d + +cov: + INTEG_TESTS=yes gocov test github.com/hashicorp/raft | gocov-html > /tmp/coverage.html + open /tmp/coverage.html + +.PHONY: test cov integ deps diff --git a/vendor/github.com/hashicorp/raft/README.md b/vendor/github.com/hashicorp/raft/README.md new file mode 100644 index 0000000..43208eb --- /dev/null +++ b/vendor/github.com/hashicorp/raft/README.md @@ -0,0 +1,107 @@ +raft [![Build Status](https://travis-ci.org/hashicorp/raft.png)](https://travis-ci.org/hashicorp/raft) +==== + +raft is a [Go](http://www.golang.org) library that manages a replicated +log and can be used with an FSM to manage replicated state machines. It +is a library for providing [consensus](http://en.wikipedia.org/wiki/Consensus_(computer_science)). + +The use cases for such a library are far-reaching as replicated state +machines are a key component of many distributed systems. They enable +building Consistent, Partition Tolerant (CP) systems, with limited +fault tolerance as well. + +## Building + +If you wish to build raft you'll need Go version 1.2+ installed. + +Please check your installation with: + +``` +go version +``` + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/raft). + +To prevent complications with cgo, the primary backend `MDBStore` is in a separate repository, +called [raft-mdb](http://github.com/hashicorp/raft-mdb). That is the recommended implementation +for the `LogStore` and `StableStore`. + +A pure Go backend using [BoltDB](https://github.com/boltdb/bolt) is also available called +[raft-boltdb](https://github.com/hashicorp/raft-boltdb). It can also be used as a `LogStore` +and `StableStore`. + +## Tagged Releases + +As of September 2017, HashiCorp will start using tags for this library to clearly indicate +major version updates. We recommend you vendor your application's dependency on this library. + +* v0.1.0 is the original stable version of the library that was in master and has been maintained +with no breaking API changes. This was in use by Consul prior to version 0.7.0. + +* v1.0.0 takes the changes that were staged in the library-v2-stage-one branch. This version +manages server identities using a UUID, so introduces some breaking API changes. It also versions +the Raft protocol, and requires some special steps when interoperating with Raft servers running +older versions of the library (see the detailed comment in config.go about version compatibility). +You can reference https://github.com/hashicorp/consul/pull/2222 for an idea of what was required +to port Consul to these new interfaces. + + This version includes some new features as well, including non voting servers, a new address + provider abstraction in the transport layer, and more resilient snapshots. + +## Protocol + +raft is based on ["Raft: In Search of an Understandable Consensus Algorithm"](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) + +A high level overview of the Raft protocol is described below, but for details please read the full +[Raft paper](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) +followed by the raft source. Any questions about the raft protocol should be sent to the +[raft-dev mailing list](https://groups.google.com/forum/#!forum/raft-dev). + +### Protocol Description + +Raft nodes are always in one of three states: follower, candidate or leader. All +nodes initially start out as a follower. In this state, nodes can accept log entries +from a leader and cast votes. If no entries are received for some time, nodes +self-promote to the candidate state. In the candidate state nodes request votes from +their peers. If a candidate receives a quorum of votes, then it is promoted to a leader. +The leader must accept new log entries and replicate to all the other followers. +In addition, if stale reads are not acceptable, all queries must also be performed on +the leader. + +Once a cluster has a leader, it is able to accept new log entries. A client can +request that a leader append a new log entry, which is an opaque binary blob to +Raft. The leader then writes the entry to durable storage and attempts to replicate +to a quorum of followers. Once the log entry is considered *committed*, it can be +*applied* to a finite state machine. The finite state machine is application specific, +and is implemented using an interface. + +An obvious question relates to the unbounded nature of a replicated log. Raft provides +a mechanism by which the current state is snapshotted, and the log is compacted. Because +of the FSM abstraction, restoring the state of the FSM must result in the same state +as a replay of old logs. This allows Raft to capture the FSM state at a point in time, +and then remove all the logs that were used to reach that state. This is performed automatically +without user intervention, and prevents unbounded disk usage as well as minimizing +time spent replaying logs. + +Lastly, there is the issue of updating the peer set when new servers are joining +or existing servers are leaving. As long as a quorum of nodes is available, this +is not an issue as Raft provides mechanisms to dynamically update the peer set. +If a quorum of nodes is unavailable, then this becomes a very challenging issue. +For example, suppose there are only 2 peers, A and B. The quorum size is also +2, meaning both nodes must agree to commit a log entry. If either A or B fails, +it is now impossible to reach quorum. This means the cluster is unable to add, +or remove a node, or commit any additional log entries. This results in *unavailability*. +At this point, manual intervention would be required to remove either A or B, +and to restart the remaining node in bootstrap mode. + +A Raft cluster of 3 nodes can tolerate a single node failure, while a cluster +of 5 can tolerate 2 node failures. The recommended configuration is to either +run 3 or 5 raft servers. This maximizes availability without +greatly sacrificing performance. + +In terms of performance, Raft is comparable to Paxos. Assuming stable leadership, +committing a log entry requires a single round trip to half of the cluster. +Thus performance is bound by disk I/O and network latency. + diff --git a/vendor/github.com/hashicorp/raft/api.go b/vendor/github.com/hashicorp/raft/api.go new file mode 100644 index 0000000..03a9961 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/api.go @@ -0,0 +1,1008 @@ +package raft + +import ( + "errors" + "fmt" + "io" + "log" + "os" + "strconv" + "sync" + "time" + + "github.com/armon/go-metrics" +) + +var ( + // ErrLeader is returned when an operation can't be completed on a + // leader node. + ErrLeader = errors.New("node is the leader") + + // ErrNotLeader is returned when an operation can't be completed on a + // follower or candidate node. + ErrNotLeader = errors.New("node is not the leader") + + // ErrLeadershipLost is returned when a leader fails to commit a log entry + // because it's been deposed in the process. + ErrLeadershipLost = errors.New("leadership lost while committing log") + + // ErrAbortedByRestore is returned when a leader fails to commit a log + // entry because it's been superseded by a user snapshot restore. + ErrAbortedByRestore = errors.New("snapshot restored while committing log") + + // ErrRaftShutdown is returned when operations are requested against an + // inactive Raft. + ErrRaftShutdown = errors.New("raft is already shutdown") + + // ErrEnqueueTimeout is returned when a command fails due to a timeout. + ErrEnqueueTimeout = errors.New("timed out enqueuing operation") + + // ErrNothingNewToSnapshot is returned when trying to create a snapshot + // but there's nothing new commited to the FSM since we started. + ErrNothingNewToSnapshot = errors.New("nothing new to snapshot") + + // ErrUnsupportedProtocol is returned when an operation is attempted + // that's not supported by the current protocol version. + ErrUnsupportedProtocol = errors.New("operation not supported with current protocol version") + + // ErrCantBootstrap is returned when attempt is made to bootstrap a + // cluster that already has state present. + ErrCantBootstrap = errors.New("bootstrap only works on new clusters") +) + +// Raft implements a Raft node. +type Raft struct { + raftState + + // protocolVersion is used to inter-operate with Raft servers running + // different versions of the library. See comments in config.go for more + // details. + protocolVersion ProtocolVersion + + // applyCh is used to async send logs to the main thread to + // be committed and applied to the FSM. + applyCh chan *logFuture + + // Configuration provided at Raft initialization + conf Config + + // FSM is the client state machine to apply commands to + fsm FSM + + // fsmMutateCh is used to send state-changing updates to the FSM. This + // receives pointers to commitTuple structures when applying logs or + // pointers to restoreFuture structures when restoring a snapshot. We + // need control over the order of these operations when doing user + // restores so that we finish applying any old log applies before we + // take a user snapshot on the leader, otherwise we might restore the + // snapshot and apply old logs to it that were in the pipe. + fsmMutateCh chan interface{} + + // fsmSnapshotCh is used to trigger a new snapshot being taken + fsmSnapshotCh chan *reqSnapshotFuture + + // lastContact is the last time we had contact from the + // leader node. This can be used to gauge staleness. + lastContact time.Time + lastContactLock sync.RWMutex + + // Leader is the current cluster leader + leader ServerAddress + leaderLock sync.RWMutex + + // leaderCh is used to notify of leadership changes + leaderCh chan bool + + // leaderState used only while state is leader + leaderState leaderState + + // Stores our local server ID, used to avoid sending RPCs to ourself + localID ServerID + + // Stores our local addr + localAddr ServerAddress + + // Used for our logging + logger *log.Logger + + // LogStore provides durable storage for logs + logs LogStore + + // Used to request the leader to make configuration changes. + configurationChangeCh chan *configurationChangeFuture + + // Tracks the latest configuration and latest committed configuration from + // the log/snapshot. + configurations configurations + + // RPC chan comes from the transport layer + rpcCh <-chan RPC + + // Shutdown channel to exit, protected to prevent concurrent exits + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + // snapshots is used to store and retrieve snapshots + snapshots SnapshotStore + + // userSnapshotCh is used for user-triggered snapshots + userSnapshotCh chan *userSnapshotFuture + + // userRestoreCh is used for user-triggered restores of external + // snapshots + userRestoreCh chan *userRestoreFuture + + // stable is a StableStore implementation for durable state + // It provides stable storage for many fields in raftState + stable StableStore + + // The transport layer we use + trans Transport + + // verifyCh is used to async send verify futures to the main thread + // to verify we are still the leader + verifyCh chan *verifyFuture + + // configurationsCh is used to get the configuration data safely from + // outside of the main thread. + configurationsCh chan *configurationsFuture + + // bootstrapCh is used to attempt an initial bootstrap from outside of + // the main thread. + bootstrapCh chan *bootstrapFuture + + // List of observers and the mutex that protects them. The observers list + // is indexed by an artificial ID which is used for deregistration. + observersLock sync.RWMutex + observers map[uint64]*Observer +} + +// BootstrapCluster initializes a server's storage with the given cluster +// configuration. This should only be called at the beginning of time for the +// cluster, and you absolutely must make sure that you call it with the same +// configuration on all the Voter servers. There is no need to bootstrap +// Nonvoter and Staging servers. +// +// One sane approach is to bootstrap a single server with a configuration +// listing just itself as a Voter, then invoke AddVoter() on it to add other +// servers to the cluster. +func BootstrapCluster(conf *Config, logs LogStore, stable StableStore, + snaps SnapshotStore, trans Transport, configuration Configuration) error { + // Validate the Raft server config. + if err := ValidateConfig(conf); err != nil { + return err + } + + // Sanity check the Raft peer configuration. + if err := checkConfiguration(configuration); err != nil { + return err + } + + // Make sure the cluster is in a clean state. + hasState, err := HasExistingState(logs, stable, snaps) + if err != nil { + return fmt.Errorf("failed to check for existing state: %v", err) + } + if hasState { + return ErrCantBootstrap + } + + // Set current term to 1. + if err := stable.SetUint64(keyCurrentTerm, 1); err != nil { + return fmt.Errorf("failed to save current term: %v", err) + } + + // Append configuration entry to log. + entry := &Log{ + Index: 1, + Term: 1, + } + if conf.ProtocolVersion < 3 { + entry.Type = LogRemovePeerDeprecated + entry.Data = encodePeers(configuration, trans) + } else { + entry.Type = LogConfiguration + entry.Data = encodeConfiguration(configuration) + } + if err := logs.StoreLog(entry); err != nil { + return fmt.Errorf("failed to append configuration entry to log: %v", err) + } + + return nil +} + +// RecoverCluster is used to manually force a new configuration in order to +// recover from a loss of quorum where the current configuration cannot be +// restored, such as when several servers die at the same time. This works by +// reading all the current state for this server, creating a snapshot with the +// supplied configuration, and then truncating the Raft log. This is the only +// safe way to force a given configuration without actually altering the log to +// insert any new entries, which could cause conflicts with other servers with +// different state. +// +// WARNING! This operation implicitly commits all entries in the Raft log, so +// in general this is an extremely unsafe operation. If you've lost your other +// servers and are performing a manual recovery, then you've also lost the +// commit information, so this is likely the best you can do, but you should be +// aware that calling this can cause Raft log entries that were in the process +// of being replicated but not yet be committed to be committed. +// +// Note the FSM passed here is used for the snapshot operations and will be +// left in a state that should not be used by the application. Be sure to +// discard this FSM and any associated state and provide a fresh one when +// calling NewRaft later. +// +// A typical way to recover the cluster is to shut down all servers and then +// run RecoverCluster on every server using an identical configuration. When +// the cluster is then restarted, and election should occur and then Raft will +// resume normal operation. If it's desired to make a particular server the +// leader, this can be used to inject a new configuration with that server as +// the sole voter, and then join up other new clean-state peer servers using +// the usual APIs in order to bring the cluster back into a known state. +func RecoverCluster(conf *Config, fsm FSM, logs LogStore, stable StableStore, + snaps SnapshotStore, trans Transport, configuration Configuration) error { + // Validate the Raft server config. + if err := ValidateConfig(conf); err != nil { + return err + } + + // Sanity check the Raft peer configuration. + if err := checkConfiguration(configuration); err != nil { + return err + } + + // Refuse to recover if there's no existing state. This would be safe to + // do, but it is likely an indication of an operator error where they + // expect data to be there and it's not. By refusing, we force them + // to show intent to start a cluster fresh by explicitly doing a + // bootstrap, rather than quietly fire up a fresh cluster here. + hasState, err := HasExistingState(logs, stable, snaps) + if err != nil { + return fmt.Errorf("failed to check for existing state: %v", err) + } + if !hasState { + return fmt.Errorf("refused to recover cluster with no initial state, this is probably an operator error") + } + + // Attempt to restore any snapshots we find, newest to oldest. + var snapshotIndex uint64 + var snapshotTerm uint64 + snapshots, err := snaps.List() + if err != nil { + return fmt.Errorf("failed to list snapshots: %v", err) + } + for _, snapshot := range snapshots { + _, source, err := snaps.Open(snapshot.ID) + if err != nil { + // Skip this one and try the next. We will detect if we + // couldn't open any snapshots. + continue + } + defer source.Close() + + if err := fsm.Restore(source); err != nil { + // Same here, skip and try the next one. + continue + } + + snapshotIndex = snapshot.Index + snapshotTerm = snapshot.Term + break + } + if len(snapshots) > 0 && (snapshotIndex == 0 || snapshotTerm == 0) { + return fmt.Errorf("failed to restore any of the available snapshots") + } + + // The snapshot information is the best known end point for the data + // until we play back the Raft log entries. + lastIndex := snapshotIndex + lastTerm := snapshotTerm + + // Apply any Raft log entries past the snapshot. + lastLogIndex, err := logs.LastIndex() + if err != nil { + return fmt.Errorf("failed to find last log: %v", err) + } + for index := snapshotIndex + 1; index <= lastLogIndex; index++ { + var entry Log + if err := logs.GetLog(index, &entry); err != nil { + return fmt.Errorf("failed to get log at index %d: %v", index, err) + } + if entry.Type == LogCommand { + _ = fsm.Apply(&entry) + } + lastIndex = entry.Index + lastTerm = entry.Term + } + + // Create a new snapshot, placing the configuration in as if it was + // committed at index 1. + snapshot, err := fsm.Snapshot() + if err != nil { + return fmt.Errorf("failed to snapshot FSM: %v", err) + } + version := getSnapshotVersion(conf.ProtocolVersion) + sink, err := snaps.Create(version, lastIndex, lastTerm, configuration, 1, trans) + if err != nil { + return fmt.Errorf("failed to create snapshot: %v", err) + } + if err := snapshot.Persist(sink); err != nil { + return fmt.Errorf("failed to persist snapshot: %v", err) + } + if err := sink.Close(); err != nil { + return fmt.Errorf("failed to finalize snapshot: %v", err) + } + + // Compact the log so that we don't get bad interference from any + // configuration change log entries that might be there. + firstLogIndex, err := logs.FirstIndex() + if err != nil { + return fmt.Errorf("failed to get first log index: %v", err) + } + if err := logs.DeleteRange(firstLogIndex, lastLogIndex); err != nil { + return fmt.Errorf("log compaction failed: %v", err) + } + + return nil +} + +// HasExistingState returns true if the server has any existing state (logs, +// knowledge of a current term, or any snapshots). +func HasExistingState(logs LogStore, stable StableStore, snaps SnapshotStore) (bool, error) { + // Make sure we don't have a current term. + currentTerm, err := stable.GetUint64(keyCurrentTerm) + if err == nil { + if currentTerm > 0 { + return true, nil + } + } else { + if err.Error() != "not found" { + return false, fmt.Errorf("failed to read current term: %v", err) + } + } + + // Make sure we have an empty log. + lastIndex, err := logs.LastIndex() + if err != nil { + return false, fmt.Errorf("failed to get last log index: %v", err) + } + if lastIndex > 0 { + return true, nil + } + + // Make sure we have no snapshots + snapshots, err := snaps.List() + if err != nil { + return false, fmt.Errorf("failed to list snapshots: %v", err) + } + if len(snapshots) > 0 { + return true, nil + } + + return false, nil +} + +// NewRaft is used to construct a new Raft node. It takes a configuration, as well +// as implementations of various interfaces that are required. If we have any +// old state, such as snapshots, logs, peers, etc, all those will be restored +// when creating the Raft node. +func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps SnapshotStore, trans Transport) (*Raft, error) { + // Validate the configuration. + if err := ValidateConfig(conf); err != nil { + return nil, err + } + + // Ensure we have a LogOutput. + var logger *log.Logger + if conf.Logger != nil { + logger = conf.Logger + } else { + if conf.LogOutput == nil { + conf.LogOutput = os.Stderr + } + logger = log.New(conf.LogOutput, "", log.LstdFlags) + } + + // Try to restore the current term. + currentTerm, err := stable.GetUint64(keyCurrentTerm) + if err != nil && err.Error() != "not found" { + return nil, fmt.Errorf("failed to load current term: %v", err) + } + + // Read the index of the last log entry. + lastIndex, err := logs.LastIndex() + if err != nil { + return nil, fmt.Errorf("failed to find last log: %v", err) + } + + // Get the last log entry. + var lastLog Log + if lastIndex > 0 { + if err = logs.GetLog(lastIndex, &lastLog); err != nil { + return nil, fmt.Errorf("failed to get last log at index %d: %v", lastIndex, err) + } + } + + // Make sure we have a valid server address and ID. + protocolVersion := conf.ProtocolVersion + localAddr := ServerAddress(trans.LocalAddr()) + localID := conf.LocalID + + // TODO (slackpad) - When we deprecate protocol version 2, remove this + // along with the AddPeer() and RemovePeer() APIs. + if protocolVersion < 3 && string(localID) != string(localAddr) { + return nil, fmt.Errorf("when running with ProtocolVersion < 3, LocalID must be set to the network address") + } + + // Create Raft struct. + r := &Raft{ + protocolVersion: protocolVersion, + applyCh: make(chan *logFuture), + conf: *conf, + fsm: fsm, + fsmMutateCh: make(chan interface{}, 128), + fsmSnapshotCh: make(chan *reqSnapshotFuture), + leaderCh: make(chan bool), + localID: localID, + localAddr: localAddr, + logger: logger, + logs: logs, + configurationChangeCh: make(chan *configurationChangeFuture), + configurations: configurations{}, + rpcCh: trans.Consumer(), + snapshots: snaps, + userSnapshotCh: make(chan *userSnapshotFuture), + userRestoreCh: make(chan *userRestoreFuture), + shutdownCh: make(chan struct{}), + stable: stable, + trans: trans, + verifyCh: make(chan *verifyFuture, 64), + configurationsCh: make(chan *configurationsFuture, 8), + bootstrapCh: make(chan *bootstrapFuture), + observers: make(map[uint64]*Observer), + } + + // Initialize as a follower. + r.setState(Follower) + + // Start as leader if specified. This should only be used + // for testing purposes. + if conf.StartAsLeader { + r.setState(Leader) + r.setLeader(r.localAddr) + } + + // Restore the current term and the last log. + r.setCurrentTerm(currentTerm) + r.setLastLog(lastLog.Index, lastLog.Term) + + // Attempt to restore a snapshot if there are any. + if err := r.restoreSnapshot(); err != nil { + return nil, err + } + + // Scan through the log for any configuration change entries. + snapshotIndex, _ := r.getLastSnapshot() + for index := snapshotIndex + 1; index <= lastLog.Index; index++ { + var entry Log + if err := r.logs.GetLog(index, &entry); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at %d: %v", index, err) + panic(err) + } + r.processConfigurationLogEntry(&entry) + } + + r.logger.Printf("[INFO] raft: Initial configuration (index=%d): %+v", + r.configurations.latestIndex, r.configurations.latest.Servers) + + // Setup a heartbeat fast-path to avoid head-of-line + // blocking where possible. It MUST be safe for this + // to be called concurrently with a blocking RPC. + trans.SetHeartbeatHandler(r.processHeartbeat) + + // Start the background work. + r.goFunc(r.run) + r.goFunc(r.runFSM) + r.goFunc(r.runSnapshots) + return r, nil +} + +// restoreSnapshot attempts to restore the latest snapshots, and fails if none +// of them can be restored. This is called at initialization time, and is +// completely unsafe to call at any other time. +func (r *Raft) restoreSnapshot() error { + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) + return err + } + + // Try to load in order of newest to oldest + for _, snapshot := range snapshots { + _, source, err := r.snapshots.Open(snapshot.ID) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapshot.ID, err) + continue + } + defer source.Close() + + if err := r.fsm.Restore(source); err != nil { + r.logger.Printf("[ERR] raft: Failed to restore snapshot %v: %v", snapshot.ID, err) + continue + } + + // Log success + r.logger.Printf("[INFO] raft: Restored from snapshot %v", snapshot.ID) + + // Update the lastApplied so we don't replay old logs + r.setLastApplied(snapshot.Index) + + // Update the last stable snapshot info + r.setLastSnapshot(snapshot.Index, snapshot.Term) + + // Update the configuration + if snapshot.Version > 0 { + r.configurations.committed = snapshot.Configuration + r.configurations.committedIndex = snapshot.ConfigurationIndex + r.configurations.latest = snapshot.Configuration + r.configurations.latestIndex = snapshot.ConfigurationIndex + } else { + configuration := decodePeers(snapshot.Peers, r.trans) + r.configurations.committed = configuration + r.configurations.committedIndex = snapshot.Index + r.configurations.latest = configuration + r.configurations.latestIndex = snapshot.Index + } + + // Success! + return nil + } + + // If we had snapshots and failed to load them, its an error + if len(snapshots) > 0 { + return fmt.Errorf("failed to load any existing snapshots") + } + return nil +} + +// BootstrapCluster is equivalent to non-member BootstrapCluster but can be +// called on an un-bootstrapped Raft instance after it has been created. This +// should only be called at the beginning of time for the cluster, and you +// absolutely must make sure that you call it with the same configuration on all +// the Voter servers. There is no need to bootstrap Nonvoter and Staging +// servers. +func (r *Raft) BootstrapCluster(configuration Configuration) Future { + bootstrapReq := &bootstrapFuture{} + bootstrapReq.init() + bootstrapReq.configuration = configuration + select { + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.bootstrapCh <- bootstrapReq: + return bootstrapReq + } +} + +// Leader is used to return the current leader of the cluster. +// It may return empty string if there is no current leader +// or the leader is unknown. +func (r *Raft) Leader() ServerAddress { + r.leaderLock.RLock() + leader := r.leader + r.leaderLock.RUnlock() + return leader +} + +// Apply is used to apply a command to the FSM in a highly consistent +// manner. This returns a future that can be used to wait on the application. +// An optional timeout can be provided to limit the amount of time we wait +// for the command to be started. This must be run on the leader or it +// will fail. +func (r *Raft) Apply(cmd []byte, timeout time.Duration) ApplyFuture { + metrics.IncrCounter([]string{"raft", "apply"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{ + log: Log{ + Type: LogCommand, + Data: cmd, + }, + } + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// Barrier is used to issue a command that blocks until all preceeding +// operations have been applied to the FSM. It can be used to ensure the +// FSM reflects all queued writes. An optional timeout can be provided to +// limit the amount of time we wait for the command to be started. This +// must be run on the leader or it will fail. +func (r *Raft) Barrier(timeout time.Duration) Future { + metrics.IncrCounter([]string{"raft", "barrier"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Create a log future, no index or term yet + logFuture := &logFuture{ + log: Log{ + Type: LogBarrier, + }, + } + logFuture.init() + + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.applyCh <- logFuture: + return logFuture + } +} + +// VerifyLeader is used to ensure the current node is still +// the leader. This can be done to prevent stale reads when a +// new leader has potentially been elected. +func (r *Raft) VerifyLeader() Future { + metrics.IncrCounter([]string{"raft", "verify_leader"}, 1) + verifyFuture := &verifyFuture{} + verifyFuture.init() + select { + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + case r.verifyCh <- verifyFuture: + return verifyFuture + } +} + +// GetConfiguration returns the latest configuration and its associated index +// currently in use. This may not yet be committed. This must not be called on +// the main thread (which can access the information directly). +func (r *Raft) GetConfiguration() ConfigurationFuture { + configReq := &configurationsFuture{} + configReq.init() + select { + case <-r.shutdownCh: + configReq.respond(ErrRaftShutdown) + return configReq + case r.configurationsCh <- configReq: + return configReq + } +} + +// AddPeer (deprecated) is used to add a new peer into the cluster. This must be +// run on the leader or it will fail. Use AddVoter/AddNonvoter instead. +func (r *Raft) AddPeer(peer ServerAddress) Future { + if r.protocolVersion > 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddStaging, + serverID: ServerID(peer), + serverAddress: peer, + prevIndex: 0, + }, 0) +} + +// RemovePeer (deprecated) is used to remove a peer from the cluster. If the +// current leader is being removed, it will cause a new election +// to occur. This must be run on the leader or it will fail. +// Use RemoveServer instead. +func (r *Raft) RemovePeer(peer ServerAddress) Future { + if r.protocolVersion > 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: RemoveServer, + serverID: ServerID(peer), + prevIndex: 0, + }, 0) +} + +// AddVoter will add the given server to the cluster as a staging server. If the +// server is already in the cluster as a voter, this updates the server's address. +// This must be run on the leader or it will fail. The leader will promote the +// staging server to a voter once that server is ready. If nonzero, prevIndex is +// the index of the only configuration upon which this change may be applied; if +// another configuration entry has been added in the meantime, this request will +// fail. If nonzero, timeout is how long this server should wait before the +// configuration change log entry is appended. +func (r *Raft) AddVoter(id ServerID, address ServerAddress, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddStaging, + serverID: id, + serverAddress: address, + prevIndex: prevIndex, + }, timeout) +} + +// AddNonvoter will add the given server to the cluster but won't assign it a +// vote. The server will receive log entries, but it won't participate in +// elections or log entry commitment. If the server is already in the cluster, +// this updates the server's address. This must be run on the leader or it will +// fail. For prevIndex and timeout, see AddVoter. +func (r *Raft) AddNonvoter(id ServerID, address ServerAddress, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: AddNonvoter, + serverID: id, + serverAddress: address, + prevIndex: prevIndex, + }, timeout) +} + +// RemoveServer will remove the given server from the cluster. If the current +// leader is being removed, it will cause a new election to occur. This must be +// run on the leader or it will fail. For prevIndex and timeout, see AddVoter. +func (r *Raft) RemoveServer(id ServerID, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 2 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: RemoveServer, + serverID: id, + prevIndex: prevIndex, + }, timeout) +} + +// DemoteVoter will take away a server's vote, if it has one. If present, the +// server will continue to receive log entries, but it won't participate in +// elections or log entry commitment. If the server is not in the cluster, this +// does nothing. This must be run on the leader or it will fail. For prevIndex +// and timeout, see AddVoter. +func (r *Raft) DemoteVoter(id ServerID, prevIndex uint64, timeout time.Duration) IndexFuture { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.requestConfigChange(configurationChangeRequest{ + command: DemoteVoter, + serverID: id, + prevIndex: prevIndex, + }, timeout) +} + +// Shutdown is used to stop the Raft background routines. +// This is not a graceful operation. Provides a future that +// can be used to block until all background routines have exited. +func (r *Raft) Shutdown() Future { + r.shutdownLock.Lock() + defer r.shutdownLock.Unlock() + + if !r.shutdown { + close(r.shutdownCh) + r.shutdown = true + r.setState(Shutdown) + return &shutdownFuture{r} + } + + // avoid closing transport twice + return &shutdownFuture{nil} +} + +// Snapshot is used to manually force Raft to take a snapshot. Returns a future +// that can be used to block until complete, and that contains a function that +// can be used to open the snapshot. +func (r *Raft) Snapshot() SnapshotFuture { + future := &userSnapshotFuture{} + future.init() + select { + case r.userSnapshotCh <- future: + return future + case <-r.shutdownCh: + future.respond(ErrRaftShutdown) + return future + } +} + +// Restore is used to manually force Raft to consume an external snapshot, such +// as if restoring from a backup. We will use the current Raft configuration, +// not the one from the snapshot, so that we can restore into a new cluster. We +// will also use the higher of the index of the snapshot, or the current index, +// and then add 1 to that, so we force a new state with a hole in the Raft log, +// so that the snapshot will be sent to followers and used for any new joiners. +// This can only be run on the leader, and blocks until the restore is complete +// or an error occurs. +// +// WARNING! This operation has the leader take on the state of the snapshot and +// then sets itself up so that it replicates that to its followers though the +// install snapshot process. This involves a potentially dangerous period where +// the leader commits ahead of its followers, so should only be used for disaster +// recovery into a fresh cluster, and should not be used in normal operations. +func (r *Raft) Restore(meta *SnapshotMeta, reader io.Reader, timeout time.Duration) error { + metrics.IncrCounter([]string{"raft", "restore"}, 1) + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + + // Perform the restore. + restore := &userRestoreFuture{ + meta: meta, + reader: reader, + } + restore.init() + select { + case <-timer: + return ErrEnqueueTimeout + case <-r.shutdownCh: + return ErrRaftShutdown + case r.userRestoreCh <- restore: + // If the restore is ingested then wait for it to complete. + if err := restore.Error(); err != nil { + return err + } + } + + // Apply a no-op log entry. Waiting for this allows us to wait until the + // followers have gotten the restore and replicated at least this new + // entry, which shows that we've also faulted and installed the + // snapshot with the contents of the restore. + noop := &logFuture{ + log: Log{ + Type: LogNoop, + }, + } + noop.init() + select { + case <-timer: + return ErrEnqueueTimeout + case <-r.shutdownCh: + return ErrRaftShutdown + case r.applyCh <- noop: + return noop.Error() + } +} + +// State is used to return the current raft state. +func (r *Raft) State() RaftState { + return r.getState() +} + +// LeaderCh is used to get a channel which delivers signals on +// acquiring or losing leadership. It sends true if we become +// the leader, and false if we lose it. The channel is not buffered, +// and does not block on writes. +func (r *Raft) LeaderCh() <-chan bool { + return r.leaderCh +} + +// String returns a string representation of this Raft node. +func (r *Raft) String() string { + return fmt.Sprintf("Node at %s [%v]", r.localAddr, r.getState()) +} + +// LastContact returns the time of last contact by a leader. +// This only makes sense if we are currently a follower. +func (r *Raft) LastContact() time.Time { + r.lastContactLock.RLock() + last := r.lastContact + r.lastContactLock.RUnlock() + return last +} + +// Stats is used to return a map of various internal stats. This +// should only be used for informative purposes or debugging. +// +// Keys are: "state", "term", "last_log_index", "last_log_term", +// "commit_index", "applied_index", "fsm_pending", +// "last_snapshot_index", "last_snapshot_term", +// "latest_configuration", "last_contact", and "num_peers". +// +// The value of "state" is a numerical value representing a +// RaftState const. +// +// The value of "latest_configuration" is a string which contains +// the id of each server, its suffrage status, and its address. +// +// The value of "last_contact" is either "never" if there +// has been no contact with a leader, "0" if the node is in the +// leader state, or the time since last contact with a leader +// formatted as a string. +// +// The value of "num_peers" is the number of other voting servers in the +// cluster, not including this node. If this node isn't part of the +// configuration then this will be "0". +// +// All other values are uint64s, formatted as strings. +func (r *Raft) Stats() map[string]string { + toString := func(v uint64) string { + return strconv.FormatUint(v, 10) + } + lastLogIndex, lastLogTerm := r.getLastLog() + lastSnapIndex, lastSnapTerm := r.getLastSnapshot() + s := map[string]string{ + "state": r.getState().String(), + "term": toString(r.getCurrentTerm()), + "last_log_index": toString(lastLogIndex), + "last_log_term": toString(lastLogTerm), + "commit_index": toString(r.getCommitIndex()), + "applied_index": toString(r.getLastApplied()), + "fsm_pending": toString(uint64(len(r.fsmMutateCh))), + "last_snapshot_index": toString(lastSnapIndex), + "last_snapshot_term": toString(lastSnapTerm), + "protocol_version": toString(uint64(r.protocolVersion)), + "protocol_version_min": toString(uint64(ProtocolVersionMin)), + "protocol_version_max": toString(uint64(ProtocolVersionMax)), + "snapshot_version_min": toString(uint64(SnapshotVersionMin)), + "snapshot_version_max": toString(uint64(SnapshotVersionMax)), + } + + future := r.GetConfiguration() + if err := future.Error(); err != nil { + r.logger.Printf("[WARN] raft: could not get configuration for Stats: %v", err) + } else { + configuration := future.Configuration() + s["latest_configuration_index"] = toString(future.Index()) + s["latest_configuration"] = fmt.Sprintf("%+v", configuration.Servers) + + // This is a legacy metric that we've seen people use in the wild. + hasUs := false + numPeers := 0 + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + hasUs = true + } else { + numPeers++ + } + } + } + if !hasUs { + numPeers = 0 + } + s["num_peers"] = toString(uint64(numPeers)) + } + + last := r.LastContact() + if r.getState() == Leader { + s["last_contact"] = "0" + } else if last.IsZero() { + s["last_contact"] = "never" + } else { + s["last_contact"] = fmt.Sprintf("%v", time.Now().Sub(last)) + } + return s +} + +// LastIndex returns the last index in stable storage, +// either from the last log or from the last snapshot. +func (r *Raft) LastIndex() uint64 { + return r.getLastIndex() +} + +// AppliedIndex returns the last index applied to the FSM. This is generally +// lagging behind the last index, especially for indexes that are persisted but +// have not yet been considered committed by the leader. NOTE - this reflects +// the last index that was sent to the application's FSM over the apply channel +// but DOES NOT mean that the application's FSM has yet consumed it and applied +// it to its internal state. Thus, the application's state may lag behind this +// index. +func (r *Raft) AppliedIndex() uint64 { + return r.getLastApplied() +} diff --git a/vendor/github.com/hashicorp/raft/commands.go b/vendor/github.com/hashicorp/raft/commands.go new file mode 100644 index 0000000..5d89e7b --- /dev/null +++ b/vendor/github.com/hashicorp/raft/commands.go @@ -0,0 +1,151 @@ +package raft + +// RPCHeader is a common sub-structure used to pass along protocol version and +// other information about the cluster. For older Raft implementations before +// versioning was added this will default to a zero-valued structure when read +// by newer Raft versions. +type RPCHeader struct { + // ProtocolVersion is the version of the protocol the sender is + // speaking. + ProtocolVersion ProtocolVersion +} + +// WithRPCHeader is an interface that exposes the RPC header. +type WithRPCHeader interface { + GetRPCHeader() RPCHeader +} + +// AppendEntriesRequest is the command used to append entries to the +// replicated log. +type AppendEntriesRequest struct { + RPCHeader + + // Provide the current term and leader + Term uint64 + Leader []byte + + // Provide the previous entries for integrity checking + PrevLogEntry uint64 + PrevLogTerm uint64 + + // New entries to commit + Entries []*Log + + // Commit index on the leader + LeaderCommitIndex uint64 +} + +// See WithRPCHeader. +func (r *AppendEntriesRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// AppendEntriesResponse is the response returned from an +// AppendEntriesRequest. +type AppendEntriesResponse struct { + RPCHeader + + // Newer term if leader is out of date + Term uint64 + + // Last Log is a hint to help accelerate rebuilding slow nodes + LastLog uint64 + + // We may not succeed if we have a conflicting entry + Success bool + + // There are scenarios where this request didn't succeed + // but there's no need to wait/back-off the next attempt. + NoRetryBackoff bool +} + +// See WithRPCHeader. +func (r *AppendEntriesResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// RequestVoteRequest is the command used by a candidate to ask a Raft peer +// for a vote in an election. +type RequestVoteRequest struct { + RPCHeader + + // Provide the term and our id + Term uint64 + Candidate []byte + + // Used to ensure safety + LastLogIndex uint64 + LastLogTerm uint64 +} + +// See WithRPCHeader. +func (r *RequestVoteRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// RequestVoteResponse is the response returned from a RequestVoteRequest. +type RequestVoteResponse struct { + RPCHeader + + // Newer term if leader is out of date. + Term uint64 + + // Peers is deprecated, but required by servers that only understand + // protocol version 0. This is not populated in protocol version 2 + // and later. + Peers []byte + + // Is the vote granted. + Granted bool +} + +// See WithRPCHeader. +func (r *RequestVoteResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// InstallSnapshotRequest is the command sent to a Raft peer to bootstrap its +// log (and state machine) from a snapshot on another peer. +type InstallSnapshotRequest struct { + RPCHeader + SnapshotVersion SnapshotVersion + + Term uint64 + Leader []byte + + // These are the last index/term included in the snapshot + LastLogIndex uint64 + LastLogTerm uint64 + + // Peer Set in the snapshot. This is deprecated in favor of Configuration + // but remains here in case we receive an InstallSnapshot from a leader + // that's running old code. + Peers []byte + + // Cluster membership. + Configuration []byte + // Log index where 'Configuration' entry was originally written. + ConfigurationIndex uint64 + + // Size of the snapshot + Size int64 +} + +// See WithRPCHeader. +func (r *InstallSnapshotRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// InstallSnapshotResponse is the response returned from an +// InstallSnapshotRequest. +type InstallSnapshotResponse struct { + RPCHeader + + Term uint64 + Success bool +} + +// See WithRPCHeader. +func (r *InstallSnapshotResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} diff --git a/vendor/github.com/hashicorp/raft/commitment.go b/vendor/github.com/hashicorp/raft/commitment.go new file mode 100644 index 0000000..7aa3646 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/commitment.go @@ -0,0 +1,101 @@ +package raft + +import ( + "sort" + "sync" +) + +// Commitment is used to advance the leader's commit index. The leader and +// replication goroutines report in newly written entries with Match(), and +// this notifies on commitCh when the commit index has advanced. +type commitment struct { + // protects matchIndexes and commitIndex + sync.Mutex + // notified when commitIndex increases + commitCh chan struct{} + // voter ID to log index: the server stores up through this log entry + matchIndexes map[ServerID]uint64 + // a quorum stores up through this log entry. monotonically increases. + commitIndex uint64 + // the first index of this leader's term: this needs to be replicated to a + // majority of the cluster before this leader may mark anything committed + // (per Raft's commitment rule) + startIndex uint64 +} + +// newCommitment returns an commitment struct that notifies the provided +// channel when log entries have been committed. A new commitment struct is +// created each time this server becomes leader for a particular term. +// 'configuration' is the servers in the cluster. +// 'startIndex' is the first index created in this term (see +// its description above). +func newCommitment(commitCh chan struct{}, configuration Configuration, startIndex uint64) *commitment { + matchIndexes := make(map[ServerID]uint64) + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + matchIndexes[server.ID] = 0 + } + } + return &commitment{ + commitCh: commitCh, + matchIndexes: matchIndexes, + commitIndex: 0, + startIndex: startIndex, + } +} + +// Called when a new cluster membership configuration is created: it will be +// used to determine commitment from now on. 'configuration' is the servers in +// the cluster. +func (c *commitment) setConfiguration(configuration Configuration) { + c.Lock() + defer c.Unlock() + oldMatchIndexes := c.matchIndexes + c.matchIndexes = make(map[ServerID]uint64) + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + c.matchIndexes[server.ID] = oldMatchIndexes[server.ID] // defaults to 0 + } + } + c.recalculate() +} + +// Called by leader after commitCh is notified +func (c *commitment) getCommitIndex() uint64 { + c.Lock() + defer c.Unlock() + return c.commitIndex +} + +// Match is called once a server completes writing entries to disk: either the +// leader has written the new entry or a follower has replied to an +// AppendEntries RPC. The given server's disk agrees with this server's log up +// through the given index. +func (c *commitment) match(server ServerID, matchIndex uint64) { + c.Lock() + defer c.Unlock() + if prev, hasVote := c.matchIndexes[server]; hasVote && matchIndex > prev { + c.matchIndexes[server] = matchIndex + c.recalculate() + } +} + +// Internal helper to calculate new commitIndex from matchIndexes. +// Must be called with lock held. +func (c *commitment) recalculate() { + if len(c.matchIndexes) == 0 { + return + } + + matched := make([]uint64, 0, len(c.matchIndexes)) + for _, idx := range c.matchIndexes { + matched = append(matched, idx) + } + sort.Sort(uint64Slice(matched)) + quorumMatchIndex := matched[(len(matched)-1)/2] + + if quorumMatchIndex > c.commitIndex && quorumMatchIndex >= c.startIndex { + c.commitIndex = quorumMatchIndex + asyncNotifyCh(c.commitCh) + } +} diff --git a/vendor/github.com/hashicorp/raft/config.go b/vendor/github.com/hashicorp/raft/config.go new file mode 100644 index 0000000..c1ce03a --- /dev/null +++ b/vendor/github.com/hashicorp/raft/config.go @@ -0,0 +1,258 @@ +package raft + +import ( + "fmt" + "io" + "log" + "time" +) + +// These are the versions of the protocol (which includes RPC messages as +// well as Raft-specific log entries) that this server can _understand_. Use +// the ProtocolVersion member of the Config object to control the version of +// the protocol to use when _speaking_ to other servers. Note that depending on +// the protocol version being spoken, some otherwise understood RPC messages +// may be refused. See dispositionRPC for details of this logic. +// +// There are notes about the upgrade path in the description of the versions +// below. If you are starting a fresh cluster then there's no reason not to +// jump right to the latest protocol version. If you need to interoperate with +// older, version 0 Raft servers you'll need to drive the cluster through the +// different versions in order. +// +// The version details are complicated, but here's a summary of what's required +// to get from a version 0 cluster to version 3: +// +// 1. In version N of your app that starts using the new Raft library with +// versioning, set ProtocolVersion to 1. +// 2. Make version N+1 of your app require version N as a prerequisite (all +// servers must be upgraded). For version N+1 of your app set ProtocolVersion +// to 2. +// 3. Similarly, make version N+2 of your app require version N+1 as a +// prerequisite. For version N+2 of your app, set ProtocolVersion to 3. +// +// During this upgrade, older cluster members will still have Server IDs equal +// to their network addresses. To upgrade an older member and give it an ID, it +// needs to leave the cluster and re-enter: +// +// 1. Remove the server from the cluster with RemoveServer, using its network +// address as its ServerID. +// 2. Update the server's config to a better ID (restarting the server). +// 3. Add the server back to the cluster with AddVoter, using its new ID. +// +// You can do this during the rolling upgrade from N+1 to N+2 of your app, or +// as a rolling change at any time after the upgrade. +// +// Version History +// +// 0: Original Raft library before versioning was added. Servers running this +// version of the Raft library use AddPeerDeprecated/RemovePeerDeprecated +// for all configuration changes, and have no support for LogConfiguration. +// 1: First versioned protocol, used to interoperate with old servers, and begin +// the migration path to newer versions of the protocol. Under this version +// all configuration changes are propagated using the now-deprecated +// RemovePeerDeprecated Raft log entry. This means that server IDs are always +// set to be the same as the server addresses (since the old log entry type +// cannot transmit an ID), and only AddPeer/RemovePeer APIs are supported. +// Servers running this version of the protocol can understand the new +// LogConfiguration Raft log entry but will never generate one so they can +// remain compatible with version 0 Raft servers in the cluster. +// 2: Transitional protocol used when migrating an existing cluster to the new +// server ID system. Server IDs are still set to be the same as server +// addresses, but all configuration changes are propagated using the new +// LogConfiguration Raft log entry type, which can carry full ID information. +// This version supports the old AddPeer/RemovePeer APIs as well as the new +// ID-based AddVoter/RemoveServer APIs which should be used when adding +// version 3 servers to the cluster later. This version sheds all +// interoperability with version 0 servers, but can interoperate with newer +// Raft servers running with protocol version 1 since they can understand the +// new LogConfiguration Raft log entry, and this version can still understand +// their RemovePeerDeprecated Raft log entries. We need this protocol version +// as an intermediate step between 1 and 3 so that servers will propagate the +// ID information that will come from newly-added (or -rolled) servers using +// protocol version 3, but since they are still using their address-based IDs +// from the previous step they will still be able to track commitments and +// their own voting status properly. If we skipped this step, servers would +// be started with their new IDs, but they wouldn't see themselves in the old +// address-based configuration, so none of the servers would think they had a +// vote. +// 3: Protocol adding full support for server IDs and new ID-based server APIs +// (AddVoter, AddNonvoter, etc.), old AddPeer/RemovePeer APIs are no longer +// supported. Version 2 servers should be swapped out by removing them from +// the cluster one-by-one and re-adding them with updated configuration for +// this protocol version, along with their server ID. The remove/add cycle +// is required to populate their server ID. Note that removing must be done +// by ID, which will be the old server's address. +type ProtocolVersion int + +const ( + ProtocolVersionMin ProtocolVersion = 0 + ProtocolVersionMax = 3 +) + +// These are versions of snapshots that this server can _understand_. Currently, +// it is always assumed that this server generates the latest version, though +// this may be changed in the future to include a configurable version. +// +// Version History +// +// 0: Original Raft library before versioning was added. The peers portion of +// these snapshots is encoded in the legacy format which requires decodePeers +// to parse. This version of snapshots should only be produced by the +// unversioned Raft library. +// 1: New format which adds support for a full configuration structure and its +// associated log index, with support for server IDs and non-voting server +// modes. To ease upgrades, this also includes the legacy peers structure but +// that will never be used by servers that understand version 1 snapshots. +// Since the original Raft library didn't enforce any versioning, we must +// include the legacy peers structure for this version, but we can deprecate +// it in the next snapshot version. +type SnapshotVersion int + +const ( + SnapshotVersionMin SnapshotVersion = 0 + SnapshotVersionMax = 1 +) + +// Config provides any necessary configuration for the Raft server. +type Config struct { + // ProtocolVersion allows a Raft server to inter-operate with older + // Raft servers running an older version of the code. This is used to + // version the wire protocol as well as Raft-specific log entries that + // the server uses when _speaking_ to other servers. There is currently + // no auto-negotiation of versions so all servers must be manually + // configured with compatible versions. See ProtocolVersionMin and + // ProtocolVersionMax for the versions of the protocol that this server + // can _understand_. + ProtocolVersion ProtocolVersion + + // HeartbeatTimeout specifies the time in follower state without + // a leader before we attempt an election. + HeartbeatTimeout time.Duration + + // ElectionTimeout specifies the time in candidate state without + // a leader before we attempt an election. + ElectionTimeout time.Duration + + // CommitTimeout controls the time without an Apply() operation + // before we heartbeat to ensure a timely commit. Due to random + // staggering, may be delayed as much as 2x this value. + CommitTimeout time.Duration + + // MaxAppendEntries controls the maximum number of append entries + // to send at once. We want to strike a balance between efficiency + // and avoiding waste if the follower is going to reject because of + // an inconsistent log. + MaxAppendEntries int + + // If we are a member of a cluster, and RemovePeer is invoked for the + // local node, then we forget all peers and transition into the follower state. + // If ShutdownOnRemove is is set, we additional shutdown Raft. Otherwise, + // we can become a leader of a cluster containing only this node. + ShutdownOnRemove bool + + // TrailingLogs controls how many logs we leave after a snapshot. This is + // used so that we can quickly replay logs on a follower instead of being + // forced to send an entire snapshot. + TrailingLogs uint64 + + // SnapshotInterval controls how often we check if we should perform a snapshot. + // We randomly stagger between this value and 2x this value to avoid the entire + // cluster from performing a snapshot at once. + SnapshotInterval time.Duration + + // SnapshotThreshold controls how many outstanding logs there must be before + // we perform a snapshot. This is to prevent excessive snapshots when we can + // just replay a small set of logs. + SnapshotThreshold uint64 + + // LeaderLeaseTimeout is used to control how long the "lease" lasts + // for being the leader without being able to contact a quorum + // of nodes. If we reach this interval without contact, we will + // step down as leader. + LeaderLeaseTimeout time.Duration + + // StartAsLeader forces Raft to start in the leader state. This should + // never be used except for testing purposes, as it can cause a split-brain. + StartAsLeader bool + + // The unique ID for this server across all time. When running with + // ProtocolVersion < 3, you must set this to be the same as the network + // address of your transport. + LocalID ServerID + + // NotifyCh is used to provide a channel that will be notified of leadership + // changes. Raft will block writing to this channel, so it should either be + // buffered or aggressively consumed. + NotifyCh chan<- bool + + // LogOutput is used as a sink for logs, unless Logger is specified. + // Defaults to os.Stderr. + LogOutput io.Writer + + // Logger is a user-provided logger. If nil, a logger writing to LogOutput + // is used. + Logger *log.Logger +} + +// DefaultConfig returns a Config with usable defaults. +func DefaultConfig() *Config { + return &Config{ + ProtocolVersion: ProtocolVersionMax, + HeartbeatTimeout: 1000 * time.Millisecond, + ElectionTimeout: 1000 * time.Millisecond, + CommitTimeout: 50 * time.Millisecond, + MaxAppendEntries: 64, + ShutdownOnRemove: true, + TrailingLogs: 10240, + SnapshotInterval: 120 * time.Second, + SnapshotThreshold: 8192, + LeaderLeaseTimeout: 500 * time.Millisecond, + } +} + +// ValidateConfig is used to validate a sane configuration +func ValidateConfig(config *Config) error { + // We don't actually support running as 0 in the library any more, but + // we do understand it. + protocolMin := ProtocolVersionMin + if protocolMin == 0 { + protocolMin = 1 + } + if config.ProtocolVersion < protocolMin || + config.ProtocolVersion > ProtocolVersionMax { + return fmt.Errorf("Protocol version %d must be >= %d and <= %d", + config.ProtocolVersion, protocolMin, ProtocolVersionMax) + } + if len(config.LocalID) == 0 { + return fmt.Errorf("LocalID cannot be empty") + } + if config.HeartbeatTimeout < 5*time.Millisecond { + return fmt.Errorf("Heartbeat timeout is too low") + } + if config.ElectionTimeout < 5*time.Millisecond { + return fmt.Errorf("Election timeout is too low") + } + if config.CommitTimeout < time.Millisecond { + return fmt.Errorf("Commit timeout is too low") + } + if config.MaxAppendEntries <= 0 { + return fmt.Errorf("MaxAppendEntries must be positive") + } + if config.MaxAppendEntries > 1024 { + return fmt.Errorf("MaxAppendEntries is too large") + } + if config.SnapshotInterval < 5*time.Millisecond { + return fmt.Errorf("Snapshot interval is too low") + } + if config.LeaderLeaseTimeout < 5*time.Millisecond { + return fmt.Errorf("Leader lease timeout is too low") + } + if config.LeaderLeaseTimeout > config.HeartbeatTimeout { + return fmt.Errorf("Leader lease timeout cannot be larger than heartbeat timeout") + } + if config.ElectionTimeout < config.HeartbeatTimeout { + return fmt.Errorf("Election timeout must be equal or greater than Heartbeat Timeout") + } + return nil +} diff --git a/vendor/github.com/hashicorp/raft/configuration.go b/vendor/github.com/hashicorp/raft/configuration.go new file mode 100644 index 0000000..4bb784d --- /dev/null +++ b/vendor/github.com/hashicorp/raft/configuration.go @@ -0,0 +1,343 @@ +package raft + +import "fmt" + +// ServerSuffrage determines whether a Server in a Configuration gets a vote. +type ServerSuffrage int + +// Note: Don't renumber these, since the numbers are written into the log. +const ( + // Voter is a server whose vote is counted in elections and whose match index + // is used in advancing the leader's commit index. + Voter ServerSuffrage = iota + // Nonvoter is a server that receives log entries but is not considered for + // elections or commitment purposes. + Nonvoter + // Staging is a server that acts like a nonvoter with one exception: once a + // staging server receives enough log entries to be sufficiently caught up to + // the leader's log, the leader will invoke a membership change to change + // the Staging server to a Voter. + Staging +) + +func (s ServerSuffrage) String() string { + switch s { + case Voter: + return "Voter" + case Nonvoter: + return "Nonvoter" + case Staging: + return "Staging" + } + return "ServerSuffrage" +} + +// ServerID is a unique string identifying a server for all time. +type ServerID string + +// ServerAddress is a network address for a server that a transport can contact. +type ServerAddress string + +// Server tracks the information about a single server in a configuration. +type Server struct { + // Suffrage determines whether the server gets a vote. + Suffrage ServerSuffrage + // ID is a unique string identifying this server for all time. + ID ServerID + // Address is its network address that a transport can contact. + Address ServerAddress +} + +// Configuration tracks which servers are in the cluster, and whether they have +// votes. This should include the local server, if it's a member of the cluster. +// The servers are listed no particular order, but each should only appear once. +// These entries are appended to the log during membership changes. +type Configuration struct { + Servers []Server +} + +// Clone makes a deep copy of a Configuration. +func (c *Configuration) Clone() (copy Configuration) { + copy.Servers = append(copy.Servers, c.Servers...) + return +} + +// ConfigurationChangeCommand is the different ways to change the cluster +// configuration. +type ConfigurationChangeCommand uint8 + +const ( + // AddStaging makes a server Staging unless its Voter. + AddStaging ConfigurationChangeCommand = iota + // AddNonvoter makes a server Nonvoter unless its Staging or Voter. + AddNonvoter + // DemoteVoter makes a server Nonvoter unless its absent. + DemoteVoter + // RemoveServer removes a server entirely from the cluster membership. + RemoveServer + // Promote is created automatically by a leader; it turns a Staging server + // into a Voter. + Promote +) + +func (c ConfigurationChangeCommand) String() string { + switch c { + case AddStaging: + return "AddStaging" + case AddNonvoter: + return "AddNonvoter" + case DemoteVoter: + return "DemoteVoter" + case RemoveServer: + return "RemoveServer" + case Promote: + return "Promote" + } + return "ConfigurationChangeCommand" +} + +// configurationChangeRequest describes a change that a leader would like to +// make to its current configuration. It's used only within a single server +// (never serialized into the log), as part of `configurationChangeFuture`. +type configurationChangeRequest struct { + command ConfigurationChangeCommand + serverID ServerID + serverAddress ServerAddress // only present for AddStaging, AddNonvoter + // prevIndex, if nonzero, is the index of the only configuration upon which + // this change may be applied; if another configuration entry has been + // added in the meantime, this request will fail. + prevIndex uint64 +} + +// configurations is state tracked on every server about its Configurations. +// Note that, per Diego's dissertation, there can be at most one uncommitted +// configuration at a time (the next configuration may not be created until the +// prior one has been committed). +// +// One downside to storing just two configurations is that if you try to take a +// snapshot when your state machine hasn't yet applied the committedIndex, we +// have no record of the configuration that would logically fit into that +// snapshot. We disallow snapshots in that case now. An alternative approach, +// which LogCabin uses, is to track every configuration change in the +// log. +type configurations struct { + // committed is the latest configuration in the log/snapshot that has been + // committed (the one with the largest index). + committed Configuration + // committedIndex is the log index where 'committed' was written. + committedIndex uint64 + // latest is the latest configuration in the log/snapshot (may be committed + // or uncommitted) + latest Configuration + // latestIndex is the log index where 'latest' was written. + latestIndex uint64 +} + +// Clone makes a deep copy of a configurations object. +func (c *configurations) Clone() (copy configurations) { + copy.committed = c.committed.Clone() + copy.committedIndex = c.committedIndex + copy.latest = c.latest.Clone() + copy.latestIndex = c.latestIndex + return +} + +// hasVote returns true if the server identified by 'id' is a Voter in the +// provided Configuration. +func hasVote(configuration Configuration, id ServerID) bool { + for _, server := range configuration.Servers { + if server.ID == id { + return server.Suffrage == Voter + } + } + return false +} + +// checkConfiguration tests a cluster membership configuration for common +// errors. +func checkConfiguration(configuration Configuration) error { + idSet := make(map[ServerID]bool) + addressSet := make(map[ServerAddress]bool) + var voters int + for _, server := range configuration.Servers { + if server.ID == "" { + return fmt.Errorf("Empty ID in configuration: %v", configuration) + } + if server.Address == "" { + return fmt.Errorf("Empty address in configuration: %v", server) + } + if idSet[server.ID] { + return fmt.Errorf("Found duplicate ID in configuration: %v", server.ID) + } + idSet[server.ID] = true + if addressSet[server.Address] { + return fmt.Errorf("Found duplicate address in configuration: %v", server.Address) + } + addressSet[server.Address] = true + if server.Suffrage == Voter { + voters++ + } + } + if voters == 0 { + return fmt.Errorf("Need at least one voter in configuration: %v", configuration) + } + return nil +} + +// nextConfiguration generates a new Configuration from the current one and a +// configuration change request. It's split from appendConfigurationEntry so +// that it can be unit tested easily. +func nextConfiguration(current Configuration, currentIndex uint64, change configurationChangeRequest) (Configuration, error) { + if change.prevIndex > 0 && change.prevIndex != currentIndex { + return Configuration{}, fmt.Errorf("Configuration changed since %v (latest is %v)", change.prevIndex, currentIndex) + } + + configuration := current.Clone() + switch change.command { + case AddStaging: + // TODO: barf on new address? + newServer := Server{ + // TODO: This should add the server as Staging, to be automatically + // promoted to Voter later. However, the promotion to Voter is not yet + // implemented, and doing so is not trivial with the way the leader loop + // coordinates with the replication goroutines today. So, for now, the + // server will have a vote right away, and the Promote case below is + // unused. + Suffrage: Voter, + ID: change.serverID, + Address: change.serverAddress, + } + found := false + for i, server := range configuration.Servers { + if server.ID == change.serverID { + if server.Suffrage == Voter { + configuration.Servers[i].Address = change.serverAddress + } else { + configuration.Servers[i] = newServer + } + found = true + break + } + } + if !found { + configuration.Servers = append(configuration.Servers, newServer) + } + case AddNonvoter: + newServer := Server{ + Suffrage: Nonvoter, + ID: change.serverID, + Address: change.serverAddress, + } + found := false + for i, server := range configuration.Servers { + if server.ID == change.serverID { + if server.Suffrage != Nonvoter { + configuration.Servers[i].Address = change.serverAddress + } else { + configuration.Servers[i] = newServer + } + found = true + break + } + } + if !found { + configuration.Servers = append(configuration.Servers, newServer) + } + case DemoteVoter: + for i, server := range configuration.Servers { + if server.ID == change.serverID { + configuration.Servers[i].Suffrage = Nonvoter + break + } + } + case RemoveServer: + for i, server := range configuration.Servers { + if server.ID == change.serverID { + configuration.Servers = append(configuration.Servers[:i], configuration.Servers[i+1:]...) + break + } + } + case Promote: + for i, server := range configuration.Servers { + if server.ID == change.serverID && server.Suffrage == Staging { + configuration.Servers[i].Suffrage = Voter + break + } + } + } + + // Make sure we didn't do something bad like remove the last voter + if err := checkConfiguration(configuration); err != nil { + return Configuration{}, err + } + + return configuration, nil +} + +// encodePeers is used to serialize a Configuration into the old peers format. +// This is here for backwards compatibility when operating with a mix of old +// servers and should be removed once we deprecate support for protocol version 1. +func encodePeers(configuration Configuration, trans Transport) []byte { + // Gather up all the voters, other suffrage types are not supported by + // this data format. + var encPeers [][]byte + for _, server := range configuration.Servers { + if server.Suffrage == Voter { + encPeers = append(encPeers, trans.EncodePeer(server.ID, server.Address)) + } + } + + // Encode the entire array. + buf, err := encodeMsgPack(encPeers) + if err != nil { + panic(fmt.Errorf("failed to encode peers: %v", err)) + } + + return buf.Bytes() +} + +// decodePeers is used to deserialize an old list of peers into a Configuration. +// This is here for backwards compatibility with old log entries and snapshots; +// it should be removed eventually. +func decodePeers(buf []byte, trans Transport) Configuration { + // Decode the buffer first. + var encPeers [][]byte + if err := decodeMsgPack(buf, &encPeers); err != nil { + panic(fmt.Errorf("failed to decode peers: %v", err)) + } + + // Deserialize each peer. + var servers []Server + for _, enc := range encPeers { + p := trans.DecodePeer(enc) + servers = append(servers, Server{ + Suffrage: Voter, + ID: ServerID(p), + Address: ServerAddress(p), + }) + } + + return Configuration{ + Servers: servers, + } +} + +// encodeConfiguration serializes a Configuration using MsgPack, or panics on +// errors. +func encodeConfiguration(configuration Configuration) []byte { + buf, err := encodeMsgPack(configuration) + if err != nil { + panic(fmt.Errorf("failed to encode configuration: %v", err)) + } + return buf.Bytes() +} + +// decodeConfiguration deserializes a Configuration using MsgPack, or panics on +// errors. +func decodeConfiguration(buf []byte) Configuration { + var configuration Configuration + if err := decodeMsgPack(buf, &configuration); err != nil { + panic(fmt.Errorf("failed to decode configuration: %v", err)) + } + return configuration +} diff --git a/vendor/github.com/hashicorp/raft/discard_snapshot.go b/vendor/github.com/hashicorp/raft/discard_snapshot.go new file mode 100644 index 0000000..5e93a9f --- /dev/null +++ b/vendor/github.com/hashicorp/raft/discard_snapshot.go @@ -0,0 +1,49 @@ +package raft + +import ( + "fmt" + "io" +) + +// DiscardSnapshotStore is used to successfully snapshot while +// always discarding the snapshot. This is useful for when the +// log should be truncated but no snapshot should be retained. +// This should never be used for production use, and is only +// suitable for testing. +type DiscardSnapshotStore struct{} + +type DiscardSnapshotSink struct{} + +// NewDiscardSnapshotStore is used to create a new DiscardSnapshotStore. +func NewDiscardSnapshotStore() *DiscardSnapshotStore { + return &DiscardSnapshotStore{} +} + +func (d *DiscardSnapshotStore) Create(version SnapshotVersion, index, term uint64, + configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) { + return &DiscardSnapshotSink{}, nil +} + +func (d *DiscardSnapshotStore) List() ([]*SnapshotMeta, error) { + return nil, nil +} + +func (d *DiscardSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + return nil, nil, fmt.Errorf("open is not supported") +} + +func (d *DiscardSnapshotSink) Write(b []byte) (int, error) { + return len(b), nil +} + +func (d *DiscardSnapshotSink) Close() error { + return nil +} + +func (d *DiscardSnapshotSink) ID() string { + return "discard" +} + +func (d *DiscardSnapshotSink) Cancel() error { + return nil +} diff --git a/vendor/github.com/hashicorp/raft/file_snapshot.go b/vendor/github.com/hashicorp/raft/file_snapshot.go new file mode 100644 index 0000000..ffc9414 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/file_snapshot.go @@ -0,0 +1,528 @@ +package raft + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "time" +) + +const ( + testPath = "permTest" + snapPath = "snapshots" + metaFilePath = "meta.json" + stateFilePath = "state.bin" + tmpSuffix = ".tmp" +) + +// FileSnapshotStore implements the SnapshotStore interface and allows +// snapshots to be made on the local disk. +type FileSnapshotStore struct { + path string + retain int + logger *log.Logger +} + +type snapMetaSlice []*fileSnapshotMeta + +// FileSnapshotSink implements SnapshotSink with a file. +type FileSnapshotSink struct { + store *FileSnapshotStore + logger *log.Logger + dir string + parentDir string + meta fileSnapshotMeta + + stateFile *os.File + stateHash hash.Hash64 + buffered *bufio.Writer + + closed bool +} + +// fileSnapshotMeta is stored on disk. We also put a CRC +// on disk so that we can verify the snapshot. +type fileSnapshotMeta struct { + SnapshotMeta + CRC []byte +} + +// bufferedFile is returned when we open a snapshot. This way +// reads are buffered and the file still gets closed. +type bufferedFile struct { + bh *bufio.Reader + fh *os.File +} + +func (b *bufferedFile) Read(p []byte) (n int, err error) { + return b.bh.Read(p) +} + +func (b *bufferedFile) Close() error { + return b.fh.Close() +} + +// NewFileSnapshotStoreWithLogger creates a new FileSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewFileSnapshotStoreWithLogger(base string, retain int, logger *log.Logger) (*FileSnapshotStore, error) { + if retain < 1 { + return nil, fmt.Errorf("must retain at least one snapshot") + } + if logger == nil { + logger = log.New(os.Stderr, "", log.LstdFlags) + } + + // Ensure our path exists + path := filepath.Join(base, snapPath) + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("snapshot path not accessible: %v", err) + } + + // Setup the store + store := &FileSnapshotStore{ + path: path, + retain: retain, + logger: logger, + } + + // Do a permissions test + if err := store.testPermissions(); err != nil { + return nil, fmt.Errorf("permissions test failed: %v", err) + } + return store, nil +} + +// NewFileSnapshotStore creates a new FileSnapshotStore based +// on a base directory. The `retain` parameter controls how many +// snapshots are retained. Must be at least 1. +func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) { + if logOutput == nil { + logOutput = os.Stderr + } + return NewFileSnapshotStoreWithLogger(base, retain, log.New(logOutput, "", log.LstdFlags)) +} + +// testPermissions tries to touch a file in our path to see if it works. +func (f *FileSnapshotStore) testPermissions() error { + path := filepath.Join(f.path, testPath) + fh, err := os.Create(path) + if err != nil { + return err + } + + if err = fh.Close(); err != nil { + return err + } + + if err = os.Remove(path); err != nil { + return err + } + return nil +} + +// snapshotName generates a name for the snapshot. +func snapshotName(term, index uint64) string { + now := time.Now() + msec := now.UnixNano() / int64(time.Millisecond) + return fmt.Sprintf("%d-%d-%d", term, index, msec) +} + +// Create is used to start a new snapshot +func (f *FileSnapshotStore) Create(version SnapshotVersion, index, term uint64, + configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) { + // We only support version 1 snapshots at this time. + if version != 1 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + // Create a new path + name := snapshotName(term, index) + path := filepath.Join(f.path, name+tmpSuffix) + f.logger.Printf("[INFO] snapshot: Creating new snapshot at %s", path) + + // Make the directory + if err := os.MkdirAll(path, 0755); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to make snapshot directory: %v", err) + return nil, err + } + + // Create the sink + sink := &FileSnapshotSink{ + store: f, + logger: f.logger, + dir: path, + parentDir: f.path, + meta: fileSnapshotMeta{ + SnapshotMeta: SnapshotMeta{ + Version: version, + ID: name, + Index: index, + Term: term, + Peers: encodePeers(configuration, trans), + Configuration: configuration, + ConfigurationIndex: configurationIndex, + }, + CRC: nil, + }, + } + + // Write out the meta data + if err := sink.writeMeta(); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) + return nil, err + } + + // Open the state file + statePath := filepath.Join(path, stateFilePath) + fh, err := os.Create(statePath) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to create state file: %v", err) + return nil, err + } + sink.stateFile = fh + + // Create a CRC64 hash + sink.stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Wrap both the hash and file in a MultiWriter with buffering + multi := io.MultiWriter(sink.stateFile, sink.stateHash) + sink.buffered = bufio.NewWriter(multi) + + // Done + return sink, nil +} + +// List returns available snapshots in the store. +func (f *FileSnapshotStore) List() ([]*SnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) + return nil, err + } + + var snapMeta []*SnapshotMeta + for _, meta := range snapshots { + snapMeta = append(snapMeta, &meta.SnapshotMeta) + if len(snapMeta) == f.retain { + break + } + } + return snapMeta, nil +} + +// getSnapshots returns all the known snapshots. +func (f *FileSnapshotStore) getSnapshots() ([]*fileSnapshotMeta, error) { + // Get the eligible snapshots + snapshots, err := ioutil.ReadDir(f.path) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to scan snapshot dir: %v", err) + return nil, err + } + + // Populate the metadata + var snapMeta []*fileSnapshotMeta + for _, snap := range snapshots { + // Ignore any files + if !snap.IsDir() { + continue + } + + // Ignore any temporary snapshots + dirName := snap.Name() + if strings.HasSuffix(dirName, tmpSuffix) { + f.logger.Printf("[WARN] snapshot: Found temporary snapshot: %v", dirName) + continue + } + + // Try to read the meta data + meta, err := f.readMeta(dirName) + if err != nil { + f.logger.Printf("[WARN] snapshot: Failed to read metadata for %v: %v", dirName, err) + continue + } + + // Make sure we can understand this version. + if meta.Version < SnapshotVersionMin || meta.Version > SnapshotVersionMax { + f.logger.Printf("[WARN] snapshot: Snapshot version for %v not supported: %d", dirName, meta.Version) + continue + } + + // Append, but only return up to the retain count + snapMeta = append(snapMeta, meta) + } + + // Sort the snapshot, reverse so we get new -> old + sort.Sort(sort.Reverse(snapMetaSlice(snapMeta))) + + return snapMeta, nil +} + +// readMeta is used to read the meta data for a given named backup +func (f *FileSnapshotStore) readMeta(name string) (*fileSnapshotMeta, error) { + // Open the meta file + metaPath := filepath.Join(f.path, name, metaFilePath) + fh, err := os.Open(metaPath) + if err != nil { + return nil, err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewReader(fh) + + // Read in the JSON + meta := &fileSnapshotMeta{} + dec := json.NewDecoder(buffered) + if err := dec.Decode(meta); err != nil { + return nil, err + } + return meta, nil +} + +// Open takes a snapshot ID and returns a ReadCloser for that snapshot. +func (f *FileSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + // Get the metadata + meta, err := f.readMeta(id) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get meta data to open snapshot: %v", err) + return nil, nil, err + } + + // Open the state file + statePath := filepath.Join(f.path, id, stateFilePath) + fh, err := os.Open(statePath) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to open state file: %v", err) + return nil, nil, err + } + + // Create a CRC64 hash + stateHash := crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + _, err = io.Copy(stateHash, fh) + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to read state file: %v", err) + fh.Close() + return nil, nil, err + } + + // Verify the hash + computed := stateHash.Sum(nil) + if bytes.Compare(meta.CRC, computed) != 0 { + f.logger.Printf("[ERR] snapshot: CRC checksum failed (stored: %v computed: %v)", + meta.CRC, computed) + fh.Close() + return nil, nil, fmt.Errorf("CRC mismatch") + } + + // Seek to the start + if _, err := fh.Seek(0, 0); err != nil { + f.logger.Printf("[ERR] snapshot: State file seek failed: %v", err) + fh.Close() + return nil, nil, err + } + + // Return a buffered file + buffered := &bufferedFile{ + bh: bufio.NewReader(fh), + fh: fh, + } + + return &meta.SnapshotMeta, buffered, nil +} + +// ReapSnapshots reaps any snapshots beyond the retain count. +func (f *FileSnapshotStore) ReapSnapshots() error { + snapshots, err := f.getSnapshots() + if err != nil { + f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) + return err + } + + for i := f.retain; i < len(snapshots); i++ { + path := filepath.Join(f.path, snapshots[i].ID) + f.logger.Printf("[INFO] snapshot: reaping snapshot %v", path) + if err := os.RemoveAll(path); err != nil { + f.logger.Printf("[ERR] snapshot: Failed to reap snapshot %v: %v", path, err) + return err + } + } + return nil +} + +// ID returns the ID of the snapshot, can be used with Open() +// after the snapshot is finalized. +func (s *FileSnapshotSink) ID() string { + return s.meta.ID +} + +// Write is used to append to the state file. We write to the +// buffered IO object to reduce the amount of context switches. +func (s *FileSnapshotSink) Write(b []byte) (int, error) { + return s.buffered.Write(b) +} + +// Close is used to indicate a successful end. +func (s *FileSnapshotSink) Close() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) + if delErr := os.RemoveAll(s.dir); delErr != nil { + s.logger.Printf("[ERR] snapshot: Failed to delete temporary snapshot directory at path %v: %v", s.dir, delErr) + return delErr + } + return err + } + + // Write out the meta data + if err := s.writeMeta(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) + return err + } + + // Move the directory into place + newPath := strings.TrimSuffix(s.dir, tmpSuffix) + if err := os.Rename(s.dir, newPath); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to move snapshot into place: %v", err) + return err + } + + if runtime.GOOS != "windows" { //skipping fsync for directory entry edits on Windows, only needed for *nix style file systems + parentFH, err := os.Open(s.parentDir) + defer parentFH.Close() + if err != nil { + s.logger.Printf("[ERR] snapshot: Failed to open snapshot parent directory %v, error: %v", s.parentDir, err) + return err + } + + if err = parentFH.Sync(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed syncing parent directory %v, error: %v", s.parentDir, err) + return err + } + } + + // Reap any old snapshots + if err := s.store.ReapSnapshots(); err != nil { + return err + } + + return nil +} + +// Cancel is used to indicate an unsuccessful end. +func (s *FileSnapshotSink) Cancel() error { + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + // Close the open handles + if err := s.finalize(); err != nil { + s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) + return err + } + + // Attempt to remove all artifacts + return os.RemoveAll(s.dir) +} + +// finalize is used to close all of our resources. +func (s *FileSnapshotSink) finalize() error { + // Flush any remaining data + if err := s.buffered.Flush(); err != nil { + return err + } + + // Sync to force fsync to disk + if err := s.stateFile.Sync(); err != nil { + return err + } + + // Get the file size + stat, statErr := s.stateFile.Stat() + + // Close the file + if err := s.stateFile.Close(); err != nil { + return err + } + + // Set the file size, check after we close + if statErr != nil { + return statErr + } + s.meta.Size = stat.Size() + + // Set the CRC + s.meta.CRC = s.stateHash.Sum(nil) + return nil +} + +// writeMeta is used to write out the metadata we have. +func (s *FileSnapshotSink) writeMeta() error { + // Open the meta file + metaPath := filepath.Join(s.dir, metaFilePath) + fh, err := os.Create(metaPath) + if err != nil { + return err + } + defer fh.Close() + + // Buffer the file IO + buffered := bufio.NewWriter(fh) + + // Write out as JSON + enc := json.NewEncoder(buffered) + if err := enc.Encode(&s.meta); err != nil { + return err + } + + if err = buffered.Flush(); err != nil { + return err + } + + if err = fh.Sync(); err != nil { + return err + } + + return nil +} + +// Implement the sort interface for []*fileSnapshotMeta. +func (s snapMetaSlice) Len() int { + return len(s) +} + +func (s snapMetaSlice) Less(i, j int) bool { + if s[i].Term != s[j].Term { + return s[i].Term < s[j].Term + } + if s[i].Index != s[j].Index { + return s[i].Index < s[j].Index + } + return s[i].ID < s[j].ID +} + +func (s snapMetaSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/hashicorp/raft/fsm.go b/vendor/github.com/hashicorp/raft/fsm.go new file mode 100644 index 0000000..c89986c --- /dev/null +++ b/vendor/github.com/hashicorp/raft/fsm.go @@ -0,0 +1,136 @@ +package raft + +import ( + "fmt" + "io" + "time" + + "github.com/armon/go-metrics" +) + +// FSM provides an interface that can be implemented by +// clients to make use of the replicated log. +type FSM interface { + // Apply log is invoked once a log entry is committed. + // It returns a value which will be made available in the + // ApplyFuture returned by Raft.Apply method if that + // method was called on the same Raft node as the FSM. + Apply(*Log) interface{} + + // Snapshot is used to support log compaction. This call should + // return an FSMSnapshot which can be used to save a point-in-time + // snapshot of the FSM. Apply and Snapshot are not called in multiple + // threads, but Apply will be called concurrently with Persist. This means + // the FSM should be implemented in a fashion that allows for concurrent + // updates while a snapshot is happening. + Snapshot() (FSMSnapshot, error) + + // Restore is used to restore an FSM from a snapshot. It is not called + // concurrently with any other command. The FSM must discard all previous + // state. + Restore(io.ReadCloser) error +} + +// FSMSnapshot is returned by an FSM in response to a Snapshot +// It must be safe to invoke FSMSnapshot methods with concurrent +// calls to Apply. +type FSMSnapshot interface { + // Persist should dump all necessary state to the WriteCloser 'sink', + // and call sink.Close() when finished or call sink.Cancel() on error. + Persist(sink SnapshotSink) error + + // Release is invoked when we are finished with the snapshot. + Release() +} + +// runFSM is a long running goroutine responsible for applying logs +// to the FSM. This is done async of other logs since we don't want +// the FSM to block our internal operations. +func (r *Raft) runFSM() { + var lastIndex, lastTerm uint64 + + commit := func(req *commitTuple) { + // Apply the log if a command + var resp interface{} + if req.log.Type == LogCommand { + start := time.Now() + resp = r.fsm.Apply(req.log) + metrics.MeasureSince([]string{"raft", "fsm", "apply"}, start) + } + + // Update the indexes + lastIndex = req.log.Index + lastTerm = req.log.Term + + // Invoke the future if given + if req.future != nil { + req.future.response = resp + req.future.respond(nil) + } + } + + restore := func(req *restoreFuture) { + // Open the snapshot + meta, source, err := r.snapshots.Open(req.ID) + if err != nil { + req.respond(fmt.Errorf("failed to open snapshot %v: %v", req.ID, err)) + return + } + + // Attempt to restore + start := time.Now() + if err := r.fsm.Restore(source); err != nil { + req.respond(fmt.Errorf("failed to restore snapshot %v: %v", req.ID, err)) + source.Close() + return + } + source.Close() + metrics.MeasureSince([]string{"raft", "fsm", "restore"}, start) + + // Update the last index and term + lastIndex = meta.Index + lastTerm = meta.Term + req.respond(nil) + } + + snapshot := func(req *reqSnapshotFuture) { + // Is there something to snapshot? + if lastIndex == 0 { + req.respond(ErrNothingNewToSnapshot) + return + } + + // Start a snapshot + start := time.Now() + snap, err := r.fsm.Snapshot() + metrics.MeasureSince([]string{"raft", "fsm", "snapshot"}, start) + + // Respond to the request + req.index = lastIndex + req.term = lastTerm + req.snapshot = snap + req.respond(err) + } + + for { + select { + case ptr := <-r.fsmMutateCh: + switch req := ptr.(type) { + case *commitTuple: + commit(req) + + case *restoreFuture: + restore(req) + + default: + panic(fmt.Errorf("bad type passed to fsmMutateCh: %#v", ptr)) + } + + case req := <-r.fsmSnapshotCh: + snapshot(req) + + case <-r.shutdownCh: + return + } + } +} diff --git a/vendor/github.com/hashicorp/raft/future.go b/vendor/github.com/hashicorp/raft/future.go new file mode 100644 index 0000000..fac59a5 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/future.go @@ -0,0 +1,289 @@ +package raft + +import ( + "fmt" + "io" + "sync" + "time" +) + +// Future is used to represent an action that may occur in the future. +type Future interface { + // Error blocks until the future arrives and then + // returns the error status of the future. + // This may be called any number of times - all + // calls will return the same value. + // Note that it is not OK to call this method + // twice concurrently on the same Future instance. + Error() error +} + +// IndexFuture is used for future actions that can result in a raft log entry +// being created. +type IndexFuture interface { + Future + + // Index holds the index of the newly applied log entry. + // This must not be called until after the Error method has returned. + Index() uint64 +} + +// ApplyFuture is used for Apply and can return the FSM response. +type ApplyFuture interface { + IndexFuture + + // Response returns the FSM response as returned + // by the FSM.Apply method. This must not be called + // until after the Error method has returned. + Response() interface{} +} + +// ConfigurationFuture is used for GetConfiguration and can return the +// latest configuration in use by Raft. +type ConfigurationFuture interface { + IndexFuture + + // Configuration contains the latest configuration. This must + // not be called until after the Error method has returned. + Configuration() Configuration +} + +// SnapshotFuture is used for waiting on a user-triggered snapshot to complete. +type SnapshotFuture interface { + Future + + // Open is a function you can call to access the underlying snapshot and + // its metadata. This must not be called until after the Error method + // has returned. + Open() (*SnapshotMeta, io.ReadCloser, error) +} + +// errorFuture is used to return a static error. +type errorFuture struct { + err error +} + +func (e errorFuture) Error() error { + return e.err +} + +func (e errorFuture) Response() interface{} { + return nil +} + +func (e errorFuture) Index() uint64 { + return 0 +} + +// deferError can be embedded to allow a future +// to provide an error in the future. +type deferError struct { + err error + errCh chan error + responded bool +} + +func (d *deferError) init() { + d.errCh = make(chan error, 1) +} + +func (d *deferError) Error() error { + if d.err != nil { + // Note that when we've received a nil error, this + // won't trigger, but the channel is closed after + // send so we'll still return nil below. + return d.err + } + if d.errCh == nil { + panic("waiting for response on nil channel") + } + d.err = <-d.errCh + return d.err +} + +func (d *deferError) respond(err error) { + if d.errCh == nil { + return + } + if d.responded { + return + } + d.errCh <- err + close(d.errCh) + d.responded = true +} + +// There are several types of requests that cause a configuration entry to +// be appended to the log. These are encoded here for leaderLoop() to process. +// This is internal to a single server. +type configurationChangeFuture struct { + logFuture + req configurationChangeRequest +} + +// bootstrapFuture is used to attempt a live bootstrap of the cluster. See the +// Raft object's BootstrapCluster member function for more details. +type bootstrapFuture struct { + deferError + + // configuration is the proposed bootstrap configuration to apply. + configuration Configuration +} + +// logFuture is used to apply a log entry and waits until +// the log is considered committed. +type logFuture struct { + deferError + log Log + response interface{} + dispatch time.Time +} + +func (l *logFuture) Response() interface{} { + return l.response +} + +func (l *logFuture) Index() uint64 { + return l.log.Index +} + +type shutdownFuture struct { + raft *Raft +} + +func (s *shutdownFuture) Error() error { + if s.raft == nil { + return nil + } + s.raft.waitShutdown() + if closeable, ok := s.raft.trans.(WithClose); ok { + closeable.Close() + } + return nil +} + +// userSnapshotFuture is used for waiting on a user-triggered snapshot to +// complete. +type userSnapshotFuture struct { + deferError + + // opener is a function used to open the snapshot. This is filled in + // once the future returns with no error. + opener func() (*SnapshotMeta, io.ReadCloser, error) +} + +// Open is a function you can call to access the underlying snapshot and its +// metadata. +func (u *userSnapshotFuture) Open() (*SnapshotMeta, io.ReadCloser, error) { + if u.opener == nil { + return nil, nil, fmt.Errorf("no snapshot available") + } else { + // Invalidate the opener so it can't get called multiple times, + // which isn't generally safe. + defer func() { + u.opener = nil + }() + return u.opener() + } +} + +// userRestoreFuture is used for waiting on a user-triggered restore of an +// external snapshot to complete. +type userRestoreFuture struct { + deferError + + // meta is the metadata that belongs with the snapshot. + meta *SnapshotMeta + + // reader is the interface to read the snapshot contents from. + reader io.Reader +} + +// reqSnapshotFuture is used for requesting a snapshot start. +// It is only used internally. +type reqSnapshotFuture struct { + deferError + + // snapshot details provided by the FSM runner before responding + index uint64 + term uint64 + snapshot FSMSnapshot +} + +// restoreFuture is used for requesting an FSM to perform a +// snapshot restore. Used internally only. +type restoreFuture struct { + deferError + ID string +} + +// verifyFuture is used to verify the current node is still +// the leader. This is to prevent a stale read. +type verifyFuture struct { + deferError + notifyCh chan *verifyFuture + quorumSize int + votes int + voteLock sync.Mutex +} + +// configurationsFuture is used to retrieve the current configurations. This is +// used to allow safe access to this information outside of the main thread. +type configurationsFuture struct { + deferError + configurations configurations +} + +// Configuration returns the latest configuration in use by Raft. +func (c *configurationsFuture) Configuration() Configuration { + return c.configurations.latest +} + +// Index returns the index of the latest configuration in use by Raft. +func (c *configurationsFuture) Index() uint64 { + return c.configurations.latestIndex +} + +// vote is used to respond to a verifyFuture. +// This may block when responding on the notifyCh. +func (v *verifyFuture) vote(leader bool) { + v.voteLock.Lock() + defer v.voteLock.Unlock() + + // Guard against having notified already + if v.notifyCh == nil { + return + } + + if leader { + v.votes++ + if v.votes >= v.quorumSize { + v.notifyCh <- v + v.notifyCh = nil + } + } else { + v.notifyCh <- v + v.notifyCh = nil + } +} + +// appendFuture is used for waiting on a pipelined append +// entries RPC. +type appendFuture struct { + deferError + start time.Time + args *AppendEntriesRequest + resp *AppendEntriesResponse +} + +func (a *appendFuture) Start() time.Time { + return a.start +} + +func (a *appendFuture) Request() *AppendEntriesRequest { + return a.args +} + +func (a *appendFuture) Response() *AppendEntriesResponse { + return a.resp +} diff --git a/vendor/github.com/hashicorp/raft/inmem_snapshot.go b/vendor/github.com/hashicorp/raft/inmem_snapshot.go new file mode 100644 index 0000000..3aa92b3 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/inmem_snapshot.go @@ -0,0 +1,106 @@ +package raft + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "sync" +) + +// InmemSnapshotStore implements the SnapshotStore interface and +// retains only the most recent snapshot +type InmemSnapshotStore struct { + latest *InmemSnapshotSink + hasSnapshot bool + sync.RWMutex +} + +// InmemSnapshotSink implements SnapshotSink in memory +type InmemSnapshotSink struct { + meta SnapshotMeta + contents *bytes.Buffer +} + +// NewInmemSnapshotStore creates a blank new InmemSnapshotStore +func NewInmemSnapshotStore() *InmemSnapshotStore { + return &InmemSnapshotStore{ + latest: &InmemSnapshotSink{ + contents: &bytes.Buffer{}, + }, + } +} + +// Create replaces the stored snapshot with a new one using the given args +func (m *InmemSnapshotStore) Create(version SnapshotVersion, index, term uint64, + configuration Configuration, configurationIndex uint64, trans Transport) (SnapshotSink, error) { + // We only support version 1 snapshots at this time. + if version != 1 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + name := snapshotName(term, index) + + m.Lock() + defer m.Unlock() + + sink := &InmemSnapshotSink{ + meta: SnapshotMeta{ + Version: version, + ID: name, + Index: index, + Term: term, + Peers: encodePeers(configuration, trans), + Configuration: configuration, + ConfigurationIndex: configurationIndex, + }, + contents: &bytes.Buffer{}, + } + m.hasSnapshot = true + m.latest = sink + + return sink, nil +} + +// List returns the latest snapshot taken +func (m *InmemSnapshotStore) List() ([]*SnapshotMeta, error) { + m.RLock() + defer m.RUnlock() + + if !m.hasSnapshot { + return []*SnapshotMeta{}, nil + } + return []*SnapshotMeta{&m.latest.meta}, nil +} + +// Open wraps an io.ReadCloser around the snapshot contents +func (m *InmemSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { + m.RLock() + defer m.RUnlock() + + if m.latest.meta.ID != id { + return nil, nil, fmt.Errorf("[ERR] snapshot: failed to open snapshot id: %s", id) + } + + return &m.latest.meta, ioutil.NopCloser(m.latest.contents), nil +} + +// Write appends the given bytes to the snapshot contents +func (s *InmemSnapshotSink) Write(p []byte) (n int, err error) { + written, err := io.Copy(s.contents, bytes.NewReader(p)) + s.meta.Size += written + return int(written), err +} + +// Close updates the Size and is otherwise a no-op +func (s *InmemSnapshotSink) Close() error { + return nil +} + +func (s *InmemSnapshotSink) ID() string { + return s.meta.ID +} + +func (s *InmemSnapshotSink) Cancel() error { + return nil +} diff --git a/vendor/github.com/hashicorp/raft/inmem_store.go b/vendor/github.com/hashicorp/raft/inmem_store.go new file mode 100644 index 0000000..6285610 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/inmem_store.go @@ -0,0 +1,130 @@ +package raft + +import ( + "errors" + "sync" +) + +// InmemStore implements the LogStore and StableStore interface. +// It should NOT EVER be used for production. It is used only for +// unit tests. Use the MDBStore implementation instead. +type InmemStore struct { + l sync.RWMutex + lowIndex uint64 + highIndex uint64 + logs map[uint64]*Log + kv map[string][]byte + kvInt map[string]uint64 +} + +// NewInmemStore returns a new in-memory backend. Do not ever +// use for production. Only for testing. +func NewInmemStore() *InmemStore { + i := &InmemStore{ + logs: make(map[uint64]*Log), + kv: make(map[string][]byte), + kvInt: make(map[string]uint64), + } + return i +} + +// FirstIndex implements the LogStore interface. +func (i *InmemStore) FirstIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.lowIndex, nil +} + +// LastIndex implements the LogStore interface. +func (i *InmemStore) LastIndex() (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.highIndex, nil +} + +// GetLog implements the LogStore interface. +func (i *InmemStore) GetLog(index uint64, log *Log) error { + i.l.RLock() + defer i.l.RUnlock() + l, ok := i.logs[index] + if !ok { + return ErrLogNotFound + } + *log = *l + return nil +} + +// StoreLog implements the LogStore interface. +func (i *InmemStore) StoreLog(log *Log) error { + return i.StoreLogs([]*Log{log}) +} + +// StoreLogs implements the LogStore interface. +func (i *InmemStore) StoreLogs(logs []*Log) error { + i.l.Lock() + defer i.l.Unlock() + for _, l := range logs { + i.logs[l.Index] = l + if i.lowIndex == 0 { + i.lowIndex = l.Index + } + if l.Index > i.highIndex { + i.highIndex = l.Index + } + } + return nil +} + +// DeleteRange implements the LogStore interface. +func (i *InmemStore) DeleteRange(min, max uint64) error { + i.l.Lock() + defer i.l.Unlock() + for j := min; j <= max; j++ { + delete(i.logs, j) + } + if min <= i.lowIndex { + i.lowIndex = max + 1 + } + if max >= i.highIndex { + i.highIndex = min - 1 + } + if i.lowIndex > i.highIndex { + i.lowIndex = 0 + i.highIndex = 0 + } + return nil +} + +// Set implements the StableStore interface. +func (i *InmemStore) Set(key []byte, val []byte) error { + i.l.Lock() + defer i.l.Unlock() + i.kv[string(key)] = val + return nil +} + +// Get implements the StableStore interface. +func (i *InmemStore) Get(key []byte) ([]byte, error) { + i.l.RLock() + defer i.l.RUnlock() + val := i.kv[string(key)] + if val == nil { + return nil, errors.New("not found") + } + return val, nil +} + +// SetUint64 implements the StableStore interface. +func (i *InmemStore) SetUint64(key []byte, val uint64) error { + i.l.Lock() + defer i.l.Unlock() + i.kvInt[string(key)] = val + return nil +} + +// GetUint64 implements the StableStore interface. +func (i *InmemStore) GetUint64(key []byte) (uint64, error) { + i.l.RLock() + defer i.l.RUnlock() + return i.kvInt[string(key)], nil +} diff --git a/vendor/github.com/hashicorp/raft/inmem_transport.go b/vendor/github.com/hashicorp/raft/inmem_transport.go new file mode 100644 index 0000000..02a7a0f --- /dev/null +++ b/vendor/github.com/hashicorp/raft/inmem_transport.go @@ -0,0 +1,329 @@ +package raft + +import ( + "fmt" + "io" + "sync" + "time" +) + +// NewInmemAddr returns a new in-memory addr with +// a randomly generate UUID as the ID. +func NewInmemAddr() ServerAddress { + return ServerAddress(generateUUID()) +} + +// inmemPipeline is used to pipeline requests for the in-mem transport. +type inmemPipeline struct { + trans *InmemTransport + peer *InmemTransport + peerAddr ServerAddress + + doneCh chan AppendFuture + inprogressCh chan *inmemPipelineInflight + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +type inmemPipelineInflight struct { + future *appendFuture + respCh <-chan RPCResponse +} + +// InmemTransport Implements the Transport interface, to allow Raft to be +// tested in-memory without going over a network. +type InmemTransport struct { + sync.RWMutex + consumerCh chan RPC + localAddr ServerAddress + peers map[ServerAddress]*InmemTransport + pipelines []*inmemPipeline + timeout time.Duration +} + +// NewInmemTransportWithTimeout is used to initialize a new transport and +// generates a random local address if none is specified. The given timeout +// will be used to decide how long to wait for a connected peer to process the +// RPCs that we're sending it. See also Connect() and Consumer(). +func NewInmemTransportWithTimeout(addr ServerAddress, timeout time.Duration) (ServerAddress, *InmemTransport) { + if string(addr) == "" { + addr = NewInmemAddr() + } + trans := &InmemTransport{ + consumerCh: make(chan RPC, 16), + localAddr: addr, + peers: make(map[ServerAddress]*InmemTransport), + timeout: timeout, + } + return addr, trans +} + +// NewInmemTransport is used to initialize a new transport +// and generates a random local address if none is specified +func NewInmemTransport(addr ServerAddress) (ServerAddress, *InmemTransport) { + return NewInmemTransportWithTimeout(addr, 50*time.Millisecond) +} + +// SetHeartbeatHandler is used to set optional fast-path for +// heartbeats, not supported for this transport. +func (i *InmemTransport) SetHeartbeatHandler(cb func(RPC)) { +} + +// Consumer implements the Transport interface. +func (i *InmemTransport) Consumer() <-chan RPC { + return i.consumerCh +} + +// LocalAddr implements the Transport interface. +func (i *InmemTransport) LocalAddr() ServerAddress { + return i.localAddr +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (i *InmemTransport) AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) { + i.Lock() + defer i.Unlock() + + peer, ok := i.peers[target] + if !ok { + return nil, fmt.Errorf("failed to connect to peer: %v", target) + } + pipeline := newInmemPipeline(i, peer, target) + i.pipelines = append(i.pipelines, pipeline) + return pipeline, nil +} + +// AppendEntries implements the Transport interface. +func (i *InmemTransport) AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*AppendEntriesResponse) + *resp = *out + return nil +} + +// RequestVote implements the Transport interface. +func (i *InmemTransport) RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*RequestVoteResponse) + *resp = *out + return nil +} + +// InstallSnapshot implements the Transport interface. +func (i *InmemTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*InstallSnapshotResponse) + *resp = *out + return nil +} + +func (i *InmemTransport) makeRPC(target ServerAddress, args interface{}, r io.Reader, timeout time.Duration) (rpcResp RPCResponse, err error) { + i.RLock() + peer, ok := i.peers[target] + i.RUnlock() + + if !ok { + err = fmt.Errorf("failed to connect to peer: %v", target) + return + } + + // Send the RPC over + respCh := make(chan RPCResponse) + peer.consumerCh <- RPC{ + Command: args, + Reader: r, + RespChan: respCh, + } + + // Wait for a response + select { + case rpcResp = <-respCh: + if rpcResp.Error != nil { + err = rpcResp.Error + } + case <-time.After(timeout): + err = fmt.Errorf("command timed out") + } + return +} + +// EncodePeer implements the Transport interface. +func (i *InmemTransport) EncodePeer(id ServerID, p ServerAddress) []byte { + return []byte(p) +} + +// DecodePeer implements the Transport interface. +func (i *InmemTransport) DecodePeer(buf []byte) ServerAddress { + return ServerAddress(buf) +} + +// Connect is used to connect this transport to another transport for +// a given peer name. This allows for local routing. +func (i *InmemTransport) Connect(peer ServerAddress, t Transport) { + trans := t.(*InmemTransport) + i.Lock() + defer i.Unlock() + i.peers[peer] = trans +} + +// Disconnect is used to remove the ability to route to a given peer. +func (i *InmemTransport) Disconnect(peer ServerAddress) { + i.Lock() + defer i.Unlock() + delete(i.peers, peer) + + // Disconnect any pipelines + n := len(i.pipelines) + for idx := 0; idx < n; idx++ { + if i.pipelines[idx].peerAddr == peer { + i.pipelines[idx].Close() + i.pipelines[idx], i.pipelines[n-1] = i.pipelines[n-1], nil + idx-- + n-- + } + } + i.pipelines = i.pipelines[:n] +} + +// DisconnectAll is used to remove all routes to peers. +func (i *InmemTransport) DisconnectAll() { + i.Lock() + defer i.Unlock() + i.peers = make(map[ServerAddress]*InmemTransport) + + // Handle pipelines + for _, pipeline := range i.pipelines { + pipeline.Close() + } + i.pipelines = nil +} + +// Close is used to permanently disable the transport +func (i *InmemTransport) Close() error { + i.DisconnectAll() + return nil +} + +func newInmemPipeline(trans *InmemTransport, peer *InmemTransport, addr ServerAddress) *inmemPipeline { + i := &inmemPipeline{ + trans: trans, + peer: peer, + peerAddr: addr, + doneCh: make(chan AppendFuture, 16), + inprogressCh: make(chan *inmemPipelineInflight, 16), + shutdownCh: make(chan struct{}), + } + go i.decodeResponses() + return i +} + +func (i *inmemPipeline) decodeResponses() { + timeout := i.trans.timeout + for { + select { + case inp := <-i.inprogressCh: + var timeoutCh <-chan time.Time + if timeout > 0 { + timeoutCh = time.After(timeout) + } + + select { + case rpcResp := <-inp.respCh: + // Copy the result back + *inp.future.resp = *rpcResp.Response.(*AppendEntriesResponse) + inp.future.respond(rpcResp.Error) + + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-timeoutCh: + inp.future.respond(fmt.Errorf("command timed out")) + select { + case i.doneCh <- inp.future: + case <-i.shutdownCh: + return + } + + case <-i.shutdownCh: + return + } + case <-i.shutdownCh: + return + } + } +} + +func (i *inmemPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Handle a timeout + var timeout <-chan time.Time + if i.trans.timeout > 0 { + timeout = time.After(i.trans.timeout) + } + + // Send the RPC over + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + Command: args, + RespChan: respCh, + } + select { + case i.peer.consumerCh <- rpc: + case <-timeout: + return nil, fmt.Errorf("command enqueue timeout") + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } + + // Send to be decoded + select { + case i.inprogressCh <- &inmemPipelineInflight{future, respCh}: + return future, nil + case <-i.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +func (i *inmemPipeline) Consumer() <-chan AppendFuture { + return i.doneCh +} + +func (i *inmemPipeline) Close() error { + i.shutdownLock.Lock() + defer i.shutdownLock.Unlock() + if i.shutdown { + return nil + } + + i.shutdown = true + close(i.shutdownCh) + return nil +} diff --git a/vendor/github.com/hashicorp/raft/log.go b/vendor/github.com/hashicorp/raft/log.go new file mode 100644 index 0000000..4ade38e --- /dev/null +++ b/vendor/github.com/hashicorp/raft/log.go @@ -0,0 +1,72 @@ +package raft + +// LogType describes various types of log entries. +type LogType uint8 + +const ( + // LogCommand is applied to a user FSM. + LogCommand LogType = iota + + // LogNoop is used to assert leadership. + LogNoop + + // LogAddPeer is used to add a new peer. This should only be used with + // older protocol versions designed to be compatible with unversioned + // Raft servers. See comments in config.go for details. + LogAddPeerDeprecated + + // LogRemovePeer is used to remove an existing peer. This should only be + // used with older protocol versions designed to be compatible with + // unversioned Raft servers. See comments in config.go for details. + LogRemovePeerDeprecated + + // LogBarrier is used to ensure all preceding operations have been + // applied to the FSM. It is similar to LogNoop, but instead of returning + // once committed, it only returns once the FSM manager acks it. Otherwise + // it is possible there are operations committed but not yet applied to + // the FSM. + LogBarrier + + // LogConfiguration establishes a membership change configuration. It is + // created when a server is added, removed, promoted, etc. Only used + // when protocol version 1 or greater is in use. + LogConfiguration +) + +// Log entries are replicated to all members of the Raft cluster +// and form the heart of the replicated state machine. +type Log struct { + // Index holds the index of the log entry. + Index uint64 + + // Term holds the election term of the log entry. + Term uint64 + + // Type holds the type of the log entry. + Type LogType + + // Data holds the log entry's type-specific data. + Data []byte +} + +// LogStore is used to provide an interface for storing +// and retrieving logs in a durable fashion. +type LogStore interface { + // FirstIndex returns the first index written. 0 for no entries. + FirstIndex() (uint64, error) + + // LastIndex returns the last index written. 0 for no entries. + LastIndex() (uint64, error) + + // GetLog gets a log entry at a given index. + GetLog(index uint64, log *Log) error + + // StoreLog stores a log entry. + StoreLog(log *Log) error + + // StoreLogs stores multiple log entries. + StoreLogs(logs []*Log) error + + // DeleteRange deletes a range of log entries. The range is inclusive. + DeleteRange(min, max uint64) error +} diff --git a/vendor/github.com/hashicorp/raft/log_cache.go b/vendor/github.com/hashicorp/raft/log_cache.go new file mode 100644 index 0000000..952e98c --- /dev/null +++ b/vendor/github.com/hashicorp/raft/log_cache.go @@ -0,0 +1,79 @@ +package raft + +import ( + "fmt" + "sync" +) + +// LogCache wraps any LogStore implementation to provide an +// in-memory ring buffer. This is used to cache access to +// the recently written entries. For implementations that do not +// cache themselves, this can provide a substantial boost by +// avoiding disk I/O on recent entries. +type LogCache struct { + store LogStore + + cache []*Log + l sync.RWMutex +} + +// NewLogCache is used to create a new LogCache with the +// given capacity and backend store. +func NewLogCache(capacity int, store LogStore) (*LogCache, error) { + if capacity <= 0 { + return nil, fmt.Errorf("capacity must be positive") + } + c := &LogCache{ + store: store, + cache: make([]*Log, capacity), + } + return c, nil +} + +func (c *LogCache) GetLog(idx uint64, log *Log) error { + // Check the buffer for an entry + c.l.RLock() + cached := c.cache[idx%uint64(len(c.cache))] + c.l.RUnlock() + + // Check if entry is valid + if cached != nil && cached.Index == idx { + *log = *cached + return nil + } + + // Forward request on cache miss + return c.store.GetLog(idx, log) +} + +func (c *LogCache) StoreLog(log *Log) error { + return c.StoreLogs([]*Log{log}) +} + +func (c *LogCache) StoreLogs(logs []*Log) error { + // Insert the logs into the ring buffer + c.l.Lock() + for _, l := range logs { + c.cache[l.Index%uint64(len(c.cache))] = l + } + c.l.Unlock() + + return c.store.StoreLogs(logs) +} + +func (c *LogCache) FirstIndex() (uint64, error) { + return c.store.FirstIndex() +} + +func (c *LogCache) LastIndex() (uint64, error) { + return c.store.LastIndex() +} + +func (c *LogCache) DeleteRange(min, max uint64) error { + // Invalidate the cache on deletes + c.l.Lock() + c.cache = make([]*Log, len(c.cache)) + c.l.Unlock() + + return c.store.DeleteRange(min, max) +} diff --git a/vendor/github.com/hashicorp/raft/membership.md b/vendor/github.com/hashicorp/raft/membership.md new file mode 100644 index 0000000..df1f83e --- /dev/null +++ b/vendor/github.com/hashicorp/raft/membership.md @@ -0,0 +1,83 @@ +Simon (@superfell) and I (@ongardie) talked through reworking this library's cluster membership changes last Friday. We don't see a way to split this into independent patches, so we're taking the next best approach: submitting the plan here for review, then working on an enormous PR. Your feedback would be appreciated. (@superfell is out this week, however, so don't expect him to respond quickly.) + +These are the main goals: + - Bringing things in line with the description in my PhD dissertation; + - Catching up new servers prior to granting them a vote, as well as allowing permanent non-voting members; and + - Eliminating the `peers.json` file, to avoid issues of consistency between that and the log/snapshot. + +## Data-centric view + +We propose to re-define a *configuration* as a set of servers, where each server includes an address (as it does today) and a mode that is either: + - *Voter*: a server whose vote is counted in elections and whose match index is used in advancing the leader's commit index. + - *Nonvoter*: a server that receives log entries but is not considered for elections or commitment purposes. + - *Staging*: a server that acts like a nonvoter with one exception: once a staging server receives enough log entries to catch up sufficiently to the leader's log, the leader will invoke a membership change to change the staging server to a voter. + +All changes to the configuration will be done by writing a new configuration to the log. The new configuration will be in affect as soon as it is appended to the log (not when it is committed like a normal state machine command). Note that, per my dissertation, there can be at most one uncommitted configuration at a time (the next configuration may not be created until the prior one has been committed). It's not strictly necessary to follow these same rules for the nonvoter/staging servers, but we think its best to treat all changes uniformly. + +Each server will track two configurations: + 1. its *committed configuration*: the latest configuration in the log/snapshot that has been committed, along with its index. + 2. its *latest configuration*: the latest configuration in the log/snapshot (may be committed or uncommitted), along with its index. + +When there's no membership change happening, these two will be the same. The latest configuration is almost always the one used, except: + - When followers truncate the suffix of their logs, they may need to fall back to the committed configuration. + - When snapshotting, the committed configuration is written, to correspond with the committed log prefix that is being snapshotted. + + +## Application API + +We propose the following operations for clients to manipulate the cluster configuration: + - AddVoter: server becomes staging unless voter, + - AddNonvoter: server becomes nonvoter unless staging or voter, + - DemoteVoter: server becomes nonvoter unless absent, + - RemovePeer: server removed from configuration, + - GetConfiguration: waits for latest config to commit, returns committed config. + +This diagram, of which I'm quite proud, shows the possible transitions: +``` ++-----------------------------------------------------------------------------+ +| | +| Start -> +--------+ | +| ,------<------------| | | +| / | absent | | +| / RemovePeer--> | | <---RemovePeer | +| / | +--------+ \ | +| / | | \ | +| AddNonvoter | AddVoter \ | +| | ,->---' `--<-. | \ | +| v / \ v \ | +| +----------+ +----------+ +----------+ | +| | | ---AddVoter--> | | -log caught up --> | | | +| | nonvoter | | staging | | voter | | +| | | <-DemoteVoter- | | ,- | | | +| +----------+ \ +----------+ / +----------+ | +| \ / | +| `--------------<---------------' | +| | ++-----------------------------------------------------------------------------+ +``` + +While these operations aren't quite symmetric, we think they're a good set to capture +the possible intent of the user. For example, if I want to make sure a server doesn't have a vote, but the server isn't part of the configuration at all, it probably shouldn't be added as a nonvoting server. + +Each of these application-level operations will be interpreted by the leader and, if it has an effect, will cause the leader to write a new configuration entry to its log. Which particular application-level operation caused the log entry to be written need not be part of the log entry. + +## Code implications + +This is a non-exhaustive list, but we came up with a few things: +- Remove the PeerStore: the `peers.json` file introduces the possibility of getting out of sync with the log and snapshot, and it's hard to maintain this atomically as the log changes. It's not clear whether it's meant to track the committed or latest configuration, either. +- Servers will have to search their snapshot and log to find the committed configuration and the latest configuration on startup. +- Bootstrap will no longer use `peers.json` but should initialize the log or snapshot with an application-provided configuration entry. +- Snapshots should store the index of their configuration along with the configuration itself. In my experience with LogCabin, the original log index of the configuration is very useful to include in debug log messages. +- As noted in hashicorp/raft#84, configuration change requests should come in via a separate channel, and one may not proceed until the last has been committed. +- As to deciding when a log is sufficiently caught up, implementing a sophisticated algorithm *is* something that can be done in a separate PR. An easy and decent placeholder is: once the staging server has reached 95% of the leader's commit index, promote it. + +## Feedback + +Again, we're looking for feedback here before we start working on this. Here are some questions to think about: + - Does this seem like where we want things to go? + - Is there anything here that should be left out? + - Is there anything else we're forgetting about? + - Is there a good way to break this up? + - What do we need to worry about in terms of backwards compatibility? + - What implication will this have on current tests? + - What's the best way to test this code, in particular the small changes that will be sprinkled all over the library? diff --git a/vendor/github.com/hashicorp/raft/net_transport.go b/vendor/github.com/hashicorp/raft/net_transport.go new file mode 100644 index 0000000..4f1f101 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/net_transport.go @@ -0,0 +1,757 @@ +package raft + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "log" + "net" + "os" + "sync" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +const ( + rpcAppendEntries uint8 = iota + rpcRequestVote + rpcInstallSnapshot + + // DefaultTimeoutScale is the default TimeoutScale in a NetworkTransport. + DefaultTimeoutScale = 256 * 1024 // 256KB + + // rpcMaxPipeline controls the maximum number of outstanding + // AppendEntries RPC calls. + rpcMaxPipeline = 128 +) + +var ( + // ErrTransportShutdown is returned when operations on a transport are + // invoked after it's been terminated. + ErrTransportShutdown = errors.New("transport shutdown") + + // ErrPipelineShutdown is returned when the pipeline is closed. + ErrPipelineShutdown = errors.New("append pipeline closed") +) + +/* + +NetworkTransport provides a network based transport that can be +used to communicate with Raft on remote machines. It requires +an underlying stream layer to provide a stream abstraction, which can +be simple TCP, TLS, etc. + +This transport is very simple and lightweight. Each RPC request is +framed by sending a byte that indicates the message type, followed +by the MsgPack encoded request. + +The response is an error string followed by the response object, +both are encoded using MsgPack. + +InstallSnapshot is special, in that after the RPC request we stream +the entire state. That socket is not re-used as the connection state +is not known if there is an error. + +*/ +type NetworkTransport struct { + connPool map[ServerAddress][]*netConn + connPoolLock sync.Mutex + + consumeCh chan RPC + + heartbeatFn func(RPC) + heartbeatFnLock sync.Mutex + + logger *log.Logger + + maxPool int + + serverAddressProvider ServerAddressProvider + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex + + stream StreamLayer + + // streamCtx is used to cancel existing connection handlers. + streamCtx context.Context + streamCancel context.CancelFunc + streamCtxLock sync.RWMutex + + timeout time.Duration + TimeoutScale int +} + +// NetworkTransportConfig encapsulates configuration for the network transport layer. +type NetworkTransportConfig struct { + // ServerAddressProvider is used to override the target address when establishing a connection to invoke an RPC + ServerAddressProvider ServerAddressProvider + + Logger *log.Logger + + // Dialer + Stream StreamLayer + + // MaxPool controls how many connections we will pool + MaxPool int + + // Timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply + // the timeout by (SnapshotSize / TimeoutScale). + Timeout time.Duration +} + +type ServerAddressProvider interface { + ServerAddr(id ServerID) (ServerAddress, error) +} + +// StreamLayer is used with the NetworkTransport to provide +// the low level stream abstraction. +type StreamLayer interface { + net.Listener + + // Dial is used to create a new outgoing connection + Dial(address ServerAddress, timeout time.Duration) (net.Conn, error) +} + +type netConn struct { + target ServerAddress + conn net.Conn + r *bufio.Reader + w *bufio.Writer + dec *codec.Decoder + enc *codec.Encoder +} + +func (n *netConn) Release() error { + return n.conn.Close() +} + +type netPipeline struct { + conn *netConn + trans *NetworkTransport + + doneCh chan AppendFuture + inprogressCh chan *appendFuture + + shutdown bool + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// NewNetworkTransportWithConfig creates a new network transport with the given config struct +func NewNetworkTransportWithConfig( + config *NetworkTransportConfig, +) *NetworkTransport { + if config.Logger == nil { + config.Logger = log.New(os.Stderr, "", log.LstdFlags) + } + trans := &NetworkTransport{ + connPool: make(map[ServerAddress][]*netConn), + consumeCh: make(chan RPC), + logger: config.Logger, + maxPool: config.MaxPool, + shutdownCh: make(chan struct{}), + stream: config.Stream, + timeout: config.Timeout, + TimeoutScale: DefaultTimeoutScale, + serverAddressProvider: config.ServerAddressProvider, + } + + // Create the connection context and then start our listener. + trans.setupStreamContext() + go trans.listen() + + return trans +} + +// NewNetworkTransport creates a new network transport with the given dialer +// and listener. The maxPool controls how many connections we will pool. The +// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply +// the timeout by (SnapshotSize / TimeoutScale). +func NewNetworkTransport( + stream StreamLayer, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) *NetworkTransport { + if logOutput == nil { + logOutput = os.Stderr + } + logger := log.New(logOutput, "", log.LstdFlags) + config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger} + return NewNetworkTransportWithConfig(config) +} + +// NewNetworkTransportWithLogger creates a new network transport with the given logger, dialer +// and listener. The maxPool controls how many connections we will pool. The +// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply +// the timeout by (SnapshotSize / TimeoutScale). +func NewNetworkTransportWithLogger( + stream StreamLayer, + maxPool int, + timeout time.Duration, + logger *log.Logger, +) *NetworkTransport { + config := &NetworkTransportConfig{Stream: stream, MaxPool: maxPool, Timeout: timeout, Logger: logger} + return NewNetworkTransportWithConfig(config) +} + +// setupStreamContext is used to create a new stream context. This should be +// called with the stream lock held. +func (n *NetworkTransport) setupStreamContext() { + ctx, cancel := context.WithCancel(context.Background()) + n.streamCtx = ctx + n.streamCancel = cancel +} + +// getStreamContext is used retrieve the current stream context. +func (n *NetworkTransport) getStreamContext() context.Context { + n.streamCtxLock.RLock() + defer n.streamCtxLock.RUnlock() + return n.streamCtx +} + +// SetHeartbeatHandler is used to setup a heartbeat handler +// as a fast-pass. This is to avoid head-of-line blocking from +// disk IO. +func (n *NetworkTransport) SetHeartbeatHandler(cb func(rpc RPC)) { + n.heartbeatFnLock.Lock() + defer n.heartbeatFnLock.Unlock() + n.heartbeatFn = cb +} + +// CloseStreams closes the current streams. +func (n *NetworkTransport) CloseStreams() { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + // Close all the connections in the connection pool and then remove their + // entry. + for k, e := range n.connPool { + for _, conn := range e { + conn.Release() + } + + delete(n.connPool, k) + } + + // Cancel the existing connections and create a new context. Both these + // operations must always be done with the lock held otherwise we can create + // connection handlers that are holding a context that will never be + // cancelable. + n.streamCtxLock.Lock() + n.streamCancel() + n.setupStreamContext() + n.streamCtxLock.Unlock() +} + +// Close is used to stop the network transport. +func (n *NetworkTransport) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + + if !n.shutdown { + close(n.shutdownCh) + n.stream.Close() + n.shutdown = true + } + return nil +} + +// Consumer implements the Transport interface. +func (n *NetworkTransport) Consumer() <-chan RPC { + return n.consumeCh +} + +// LocalAddr implements the Transport interface. +func (n *NetworkTransport) LocalAddr() ServerAddress { + return ServerAddress(n.stream.Addr().String()) +} + +// IsShutdown is used to check if the transport is shutdown. +func (n *NetworkTransport) IsShutdown() bool { + select { + case <-n.shutdownCh: + return true + default: + return false + } +} + +// getExistingConn is used to grab a pooled connection. +func (n *NetworkTransport) getPooledConn(target ServerAddress) *netConn { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + conns, ok := n.connPool[target] + if !ok || len(conns) == 0 { + return nil + } + + var conn *netConn + num := len(conns) + conn, conns[num-1] = conns[num-1], nil + n.connPool[target] = conns[:num-1] + return conn +} + +// getConnFromAddressProvider returns a connection from the server address provider if available, or defaults to a connection using the target server address +func (n *NetworkTransport) getConnFromAddressProvider(id ServerID, target ServerAddress) (*netConn, error) { + address := n.getProviderAddressOrFallback(id, target) + return n.getConn(address) +} + +func (n *NetworkTransport) getProviderAddressOrFallback(id ServerID, target ServerAddress) ServerAddress { + if n.serverAddressProvider != nil { + serverAddressOverride, err := n.serverAddressProvider.ServerAddr(id) + if err != nil { + n.logger.Printf("[WARN] raft: Unable to get address for server id %v, using fallback address %v: %v", id, target, err) + } else { + return serverAddressOverride + } + } + return target +} + +// getConn is used to get a connection from the pool. +func (n *NetworkTransport) getConn(target ServerAddress) (*netConn, error) { + // Check for a pooled conn + if conn := n.getPooledConn(target); conn != nil { + return conn, nil + } + + // Dial a new connection + conn, err := n.stream.Dial(target, n.timeout) + if err != nil { + return nil, err + } + + // Wrap the conn + netConn := &netConn{ + target: target, + conn: conn, + r: bufio.NewReader(conn), + w: bufio.NewWriter(conn), + } + + // Setup encoder/decoders + netConn.dec = codec.NewDecoder(netConn.r, &codec.MsgpackHandle{}) + netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{}) + + // Done + return netConn, nil +} + +// returnConn returns a connection back to the pool. +func (n *NetworkTransport) returnConn(conn *netConn) { + n.connPoolLock.Lock() + defer n.connPoolLock.Unlock() + + key := conn.target + conns, _ := n.connPool[key] + + if !n.IsShutdown() && len(conns) < n.maxPool { + n.connPool[key] = append(conns, conn) + } else { + conn.Release() + } +} + +// AppendEntriesPipeline returns an interface that can be used to pipeline +// AppendEntries requests. +func (n *NetworkTransport) AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) { + // Get a connection + conn, err := n.getConnFromAddressProvider(id, target) + if err != nil { + return nil, err + } + + // Create the pipeline + return newNetPipeline(n, conn), nil +} + +// AppendEntries implements the Transport interface. +func (n *NetworkTransport) AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { + return n.genericRPC(id, target, rpcAppendEntries, args, resp) +} + +// RequestVote implements the Transport interface. +func (n *NetworkTransport) RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error { + return n.genericRPC(id, target, rpcRequestVote, args, resp) +} + +// genericRPC handles a simple request/response RPC. +func (n *NetworkTransport) genericRPC(id ServerID, target ServerAddress, rpcType uint8, args interface{}, resp interface{}) error { + // Get a conn + conn, err := n.getConnFromAddressProvider(id, target) + if err != nil { + return err + } + + // Set a deadline + if n.timeout > 0 { + conn.conn.SetDeadline(time.Now().Add(n.timeout)) + } + + // Send the RPC + if err = sendRPC(conn, rpcType, args); err != nil { + return err + } + + // Decode the response + canReturn, err := decodeResponse(conn, resp) + if canReturn { + n.returnConn(conn) + } + return err +} + +// InstallSnapshot implements the Transport interface. +func (n *NetworkTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { + // Get a conn, always close for InstallSnapshot + conn, err := n.getConnFromAddressProvider(id, target) + if err != nil { + return err + } + defer conn.Release() + + // Set a deadline, scaled by request size + if n.timeout > 0 { + timeout := n.timeout * time.Duration(args.Size/int64(n.TimeoutScale)) + if timeout < n.timeout { + timeout = n.timeout + } + conn.conn.SetDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err = sendRPC(conn, rpcInstallSnapshot, args); err != nil { + return err + } + + // Stream the state + if _, err = io.Copy(conn.w, data); err != nil { + return err + } + + // Flush + if err = conn.w.Flush(); err != nil { + return err + } + + // Decode the response, do not return conn + _, err = decodeResponse(conn, resp) + return err +} + +// EncodePeer implements the Transport interface. +func (n *NetworkTransport) EncodePeer(id ServerID, p ServerAddress) []byte { + address := n.getProviderAddressOrFallback(id, p) + return []byte(address) +} + +// DecodePeer implements the Transport interface. +func (n *NetworkTransport) DecodePeer(buf []byte) ServerAddress { + return ServerAddress(buf) +} + +// listen is used to handling incoming connections. +func (n *NetworkTransport) listen() { + const baseDelay = 5 * time.Millisecond + const maxDelay = 1 * time.Second + + var loopDelay time.Duration + for { + // Accept incoming connections + conn, err := n.stream.Accept() + if err != nil { + if loopDelay == 0 { + loopDelay = baseDelay + } else { + loopDelay *= 2 + } + + if loopDelay > maxDelay { + loopDelay = maxDelay + } + + if !n.IsShutdown() { + n.logger.Printf("[ERR] raft-net: Failed to accept connection: %v", err) + } + + select { + case <-n.shutdownCh: + return + case <-time.After(loopDelay): + continue + } + } + // No error, reset loop delay + loopDelay = 0 + + n.logger.Printf("[DEBUG] raft-net: %v accepted connection from: %v", n.LocalAddr(), conn.RemoteAddr()) + + // Handle the connection in dedicated routine + go n.handleConn(n.getStreamContext(), conn) + } +} + +// handleConn is used to handle an inbound connection for its lifespan. The +// handler will exit when the passed context is cancelled or the connection is +// closed. +func (n *NetworkTransport) handleConn(connCtx context.Context, conn net.Conn) { + defer conn.Close() + r := bufio.NewReader(conn) + w := bufio.NewWriter(conn) + dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) + enc := codec.NewEncoder(w, &codec.MsgpackHandle{}) + + for { + select { + case <-connCtx.Done(): + n.logger.Println("[DEBUG] raft-net: stream layer is closed") + return + default: + } + + if err := n.handleCommand(r, dec, enc); err != nil { + if err != io.EOF { + n.logger.Printf("[ERR] raft-net: Failed to decode incoming command: %v", err) + } + return + } + if err := w.Flush(); err != nil { + n.logger.Printf("[ERR] raft-net: Failed to flush response: %v", err) + return + } + } +} + +// handleCommand is used to decode and dispatch a single command. +func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, enc *codec.Encoder) error { + // Get the rpc type + rpcType, err := r.ReadByte() + if err != nil { + return err + } + + // Create the RPC object + respCh := make(chan RPCResponse, 1) + rpc := RPC{ + RespChan: respCh, + } + + // Decode the command + isHeartbeat := false + switch rpcType { + case rpcAppendEntries: + var req AppendEntriesRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + // Check if this is a heartbeat + if req.Term != 0 && req.Leader != nil && + req.PrevLogEntry == 0 && req.PrevLogTerm == 0 && + len(req.Entries) == 0 && req.LeaderCommitIndex == 0 { + isHeartbeat = true + } + + case rpcRequestVote: + var req RequestVoteRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + + case rpcInstallSnapshot: + var req InstallSnapshotRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + rpc.Reader = io.LimitReader(r, req.Size) + + default: + return fmt.Errorf("unknown rpc type %d", rpcType) + } + + // Check for heartbeat fast-path + if isHeartbeat { + n.heartbeatFnLock.Lock() + fn := n.heartbeatFn + n.heartbeatFnLock.Unlock() + if fn != nil { + fn(rpc) + goto RESP + } + } + + // Dispatch the RPC + select { + case n.consumeCh <- rpc: + case <-n.shutdownCh: + return ErrTransportShutdown + } + + // Wait for response +RESP: + select { + case resp := <-respCh: + // Send the error first + respErr := "" + if resp.Error != nil { + respErr = resp.Error.Error() + } + if err := enc.Encode(respErr); err != nil { + return err + } + + // Send the response + if err := enc.Encode(resp.Response); err != nil { + return err + } + case <-n.shutdownCh: + return ErrTransportShutdown + } + return nil +} + +// decodeResponse is used to decode an RPC response and reports whether +// the connection can be reused. +func decodeResponse(conn *netConn, resp interface{}) (bool, error) { + // Decode the error if any + var rpcError string + if err := conn.dec.Decode(&rpcError); err != nil { + conn.Release() + return false, err + } + + // Decode the response + if err := conn.dec.Decode(resp); err != nil { + conn.Release() + return false, err + } + + // Format an error if any + if rpcError != "" { + return true, fmt.Errorf(rpcError) + } + return true, nil +} + +// sendRPC is used to encode and send the RPC. +func sendRPC(conn *netConn, rpcType uint8, args interface{}) error { + // Write the request type + if err := conn.w.WriteByte(rpcType); err != nil { + conn.Release() + return err + } + + // Send the request + if err := conn.enc.Encode(args); err != nil { + conn.Release() + return err + } + + // Flush + if err := conn.w.Flush(); err != nil { + conn.Release() + return err + } + return nil +} + +// newNetPipeline is used to construct a netPipeline from a given +// transport and connection. +func newNetPipeline(trans *NetworkTransport, conn *netConn) *netPipeline { + n := &netPipeline{ + conn: conn, + trans: trans, + doneCh: make(chan AppendFuture, rpcMaxPipeline), + inprogressCh: make(chan *appendFuture, rpcMaxPipeline), + shutdownCh: make(chan struct{}), + } + go n.decodeResponses() + return n +} + +// decodeResponses is a long running routine that decodes the responses +// sent on the connection. +func (n *netPipeline) decodeResponses() { + timeout := n.trans.timeout + for { + select { + case future := <-n.inprogressCh: + if timeout > 0 { + n.conn.conn.SetReadDeadline(time.Now().Add(timeout)) + } + + _, err := decodeResponse(n.conn, future.resp) + future.respond(err) + select { + case n.doneCh <- future: + case <-n.shutdownCh: + return + } + case <-n.shutdownCh: + return + } + } +} + +// AppendEntries is used to pipeline a new append entries request. +func (n *netPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { + // Create a new future + future := &appendFuture{ + start: time.Now(), + args: args, + resp: resp, + } + future.init() + + // Add a send timeout + if timeout := n.trans.timeout; timeout > 0 { + n.conn.conn.SetWriteDeadline(time.Now().Add(timeout)) + } + + // Send the RPC + if err := sendRPC(n.conn, rpcAppendEntries, future.args); err != nil { + return nil, err + } + + // Hand-off for decoding, this can also cause back-pressure + // to prevent too many inflight requests + select { + case n.inprogressCh <- future: + return future, nil + case <-n.shutdownCh: + return nil, ErrPipelineShutdown + } +} + +// Consumer returns a channel that can be used to consume complete futures. +func (n *netPipeline) Consumer() <-chan AppendFuture { + return n.doneCh +} + +// Closed is used to shutdown the pipeline connection. +func (n *netPipeline) Close() error { + n.shutdownLock.Lock() + defer n.shutdownLock.Unlock() + if n.shutdown { + return nil + } + + // Release the connection + n.conn.Release() + + n.shutdown = true + close(n.shutdownCh) + return nil +} diff --git a/vendor/github.com/hashicorp/raft/observer.go b/vendor/github.com/hashicorp/raft/observer.go new file mode 100644 index 0000000..bce17ef --- /dev/null +++ b/vendor/github.com/hashicorp/raft/observer.go @@ -0,0 +1,122 @@ +package raft + +import ( + "sync/atomic" +) + +// Observation is sent along the given channel to observers when an event occurs. +type Observation struct { + // Raft holds the Raft instance generating the observation. + Raft *Raft + // Data holds observation-specific data. Possible types are + // *RequestVoteRequest and RaftState. + Data interface{} +} + +// LeaderObservation is used for the data when leadership changes. +type LeaderObservation struct { + leader ServerAddress +} + +// nextObserverId is used to provide a unique ID for each observer to aid in +// deregistration. +var nextObserverID uint64 + +// FilterFn is a function that can be registered in order to filter observations. +// The function reports whether the observation should be included - if +// it returns false, the observation will be filtered out. +type FilterFn func(o *Observation) bool + +// Observer describes what to do with a given observation. +type Observer struct { + // numObserved and numDropped are performance counters for this observer. + // 64 bit types must be 64 bit aligned to use with atomic operations on + // 32 bit platforms, so keep them at the top of the struct. + numObserved uint64 + numDropped uint64 + + // channel receives observations. + channel chan Observation + + // blocking, if true, will cause Raft to block when sending an observation + // to this observer. This should generally be set to false. + blocking bool + + // filter will be called to determine if an observation should be sent to + // the channel. + filter FilterFn + + // id is the ID of this observer in the Raft map. + id uint64 +} + +// NewObserver creates a new observer that can be registered +// to make observations on a Raft instance. Observations +// will be sent on the given channel if they satisfy the +// given filter. +// +// If blocking is true, the observer will block when it can't +// send on the channel, otherwise it may discard events. +func NewObserver(channel chan Observation, blocking bool, filter FilterFn) *Observer { + return &Observer{ + channel: channel, + blocking: blocking, + filter: filter, + id: atomic.AddUint64(&nextObserverID, 1), + } +} + +// GetNumObserved returns the number of observations. +func (or *Observer) GetNumObserved() uint64 { + return atomic.LoadUint64(&or.numObserved) +} + +// GetNumDropped returns the number of dropped observations due to blocking. +func (or *Observer) GetNumDropped() uint64 { + return atomic.LoadUint64(&or.numDropped) +} + +// RegisterObserver registers a new observer. +func (r *Raft) RegisterObserver(or *Observer) { + r.observersLock.Lock() + defer r.observersLock.Unlock() + r.observers[or.id] = or +} + +// DeregisterObserver deregisters an observer. +func (r *Raft) DeregisterObserver(or *Observer) { + r.observersLock.Lock() + defer r.observersLock.Unlock() + delete(r.observers, or.id) +} + +// observe sends an observation to every observer. +func (r *Raft) observe(o interface{}) { + // In general observers should not block. But in any case this isn't + // disastrous as we only hold a read lock, which merely prevents + // registration / deregistration of observers. + r.observersLock.RLock() + defer r.observersLock.RUnlock() + for _, or := range r.observers { + // It's wasteful to do this in the loop, but for the common case + // where there are no observers we won't create any objects. + ob := Observation{Raft: r, Data: o} + if or.filter != nil && !or.filter(&ob) { + continue + } + if or.channel == nil { + continue + } + if or.blocking { + or.channel <- ob + atomic.AddUint64(&or.numObserved, 1) + } else { + select { + case or.channel <- ob: + atomic.AddUint64(&or.numObserved, 1) + default: + atomic.AddUint64(&or.numDropped, 1) + } + } + } +} diff --git a/vendor/github.com/hashicorp/raft/peersjson.go b/vendor/github.com/hashicorp/raft/peersjson.go new file mode 100644 index 0000000..38ca2a8 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/peersjson.go @@ -0,0 +1,98 @@ +package raft + +import ( + "bytes" + "encoding/json" + "io/ioutil" +) + +// ReadPeersJSON consumes a legacy peers.json file in the format of the old JSON +// peer store and creates a new-style configuration structure. This can be used +// to migrate this data or perform manual recovery when running protocol versions +// that can interoperate with older, unversioned Raft servers. This should not be +// used once server IDs are in use, because the old peers.json file didn't have +// support for these, nor non-voter suffrage types. +func ReadPeersJSON(path string) (Configuration, error) { + // Read in the file. + buf, err := ioutil.ReadFile(path) + if err != nil { + return Configuration{}, err + } + + // Parse it as JSON. + var peers []string + dec := json.NewDecoder(bytes.NewReader(buf)) + if err := dec.Decode(&peers); err != nil { + return Configuration{}, err + } + + // Map it into the new-style configuration structure. We can only specify + // voter roles here, and the ID has to be the same as the address. + var configuration Configuration + for _, peer := range peers { + server := Server{ + Suffrage: Voter, + ID: ServerID(peer), + Address: ServerAddress(peer), + } + configuration.Servers = append(configuration.Servers, server) + } + + // We should only ingest valid configurations. + if err := checkConfiguration(configuration); err != nil { + return Configuration{}, err + } + return configuration, nil +} + +// configEntry is used when decoding a new-style peers.json. +type configEntry struct { + // ID is the ID of the server (a UUID, usually). + ID ServerID `json:"id"` + + // Address is the host:port of the server. + Address ServerAddress `json:"address"` + + // NonVoter controls the suffrage. We choose this sense so people + // can leave this out and get a Voter by default. + NonVoter bool `json:"non_voter"` +} + +// ReadConfigJSON reads a new-style peers.json and returns a configuration +// structure. This can be used to perform manual recovery when running protocol +// versions that use server IDs. +func ReadConfigJSON(path string) (Configuration, error) { + // Read in the file. + buf, err := ioutil.ReadFile(path) + if err != nil { + return Configuration{}, err + } + + // Parse it as JSON. + var peers []configEntry + dec := json.NewDecoder(bytes.NewReader(buf)) + if err := dec.Decode(&peers); err != nil { + return Configuration{}, err + } + + // Map it into the new-style configuration structure. + var configuration Configuration + for _, peer := range peers { + suffrage := Voter + if peer.NonVoter { + suffrage = Nonvoter + } + server := Server{ + Suffrage: suffrage, + ID: peer.ID, + Address: peer.Address, + } + configuration.Servers = append(configuration.Servers, server) + } + + // We should only ingest valid configurations. + if err := checkConfiguration(configuration); err != nil { + return Configuration{}, err + } + return configuration, nil +} diff --git a/vendor/github.com/hashicorp/raft/raft.go b/vendor/github.com/hashicorp/raft/raft.go new file mode 100644 index 0000000..395ecf7 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/raft.go @@ -0,0 +1,1470 @@ +package raft + +import ( + "bytes" + "container/list" + "fmt" + "io" + "io/ioutil" + "time" + + "github.com/armon/go-metrics" +) + +const ( + minCheckInterval = 10 * time.Millisecond +) + +var ( + keyCurrentTerm = []byte("CurrentTerm") + keyLastVoteTerm = []byte("LastVoteTerm") + keyLastVoteCand = []byte("LastVoteCand") +) + +// getRPCHeader returns an initialized RPCHeader struct for the given +// Raft instance. This structure is sent along with RPC requests and +// responses. +func (r *Raft) getRPCHeader() RPCHeader { + return RPCHeader{ + ProtocolVersion: r.conf.ProtocolVersion, + } +} + +// checkRPCHeader houses logic about whether this instance of Raft can process +// the given RPC message. +func (r *Raft) checkRPCHeader(rpc RPC) error { + // Get the header off the RPC message. + wh, ok := rpc.Command.(WithRPCHeader) + if !ok { + return fmt.Errorf("RPC does not have a header") + } + header := wh.GetRPCHeader() + + // First check is to just make sure the code can understand the + // protocol at all. + if header.ProtocolVersion < ProtocolVersionMin || + header.ProtocolVersion > ProtocolVersionMax { + return ErrUnsupportedProtocol + } + + // Second check is whether we should support this message, given the + // current protocol we are configured to run. This will drop support + // for protocol version 0 starting at protocol version 2, which is + // currently what we want, and in general support one version back. We + // may need to revisit this policy depending on how future protocol + // changes evolve. + if header.ProtocolVersion < r.conf.ProtocolVersion-1 { + return ErrUnsupportedProtocol + } + + return nil +} + +// getSnapshotVersion returns the snapshot version that should be used when +// creating snapshots, given the protocol version in use. +func getSnapshotVersion(protocolVersion ProtocolVersion) SnapshotVersion { + // Right now we only have two versions and they are backwards compatible + // so we don't need to look at the protocol version. + return 1 +} + +// commitTuple is used to send an index that was committed, +// with an optional associated future that should be invoked. +type commitTuple struct { + log *Log + future *logFuture +} + +// leaderState is state that is used while we are a leader. +type leaderState struct { + commitCh chan struct{} + commitment *commitment + inflight *list.List // list of logFuture in log index order + replState map[ServerID]*followerReplication + notify map[*verifyFuture]struct{} + stepDown chan struct{} +} + +// setLeader is used to modify the current leader of the cluster +func (r *Raft) setLeader(leader ServerAddress) { + r.leaderLock.Lock() + oldLeader := r.leader + r.leader = leader + r.leaderLock.Unlock() + if oldLeader != leader { + r.observe(LeaderObservation{leader: leader}) + } +} + +// requestConfigChange is a helper for the above functions that make +// configuration change requests. 'req' describes the change. For timeout, +// see AddVoter. +func (r *Raft) requestConfigChange(req configurationChangeRequest, timeout time.Duration) IndexFuture { + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + future := &configurationChangeFuture{ + req: req, + } + future.init() + select { + case <-timer: + return errorFuture{ErrEnqueueTimeout} + case r.configurationChangeCh <- future: + return future + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + } +} + +// run is a long running goroutine that runs the Raft FSM. +func (r *Raft) run() { + for { + // Check if we are doing a shutdown + select { + case <-r.shutdownCh: + // Clear the leader to prevent forwarding + r.setLeader("") + return + default: + } + + // Enter into a sub-FSM + switch r.getState() { + case Follower: + r.runFollower() + case Candidate: + r.runCandidate() + case Leader: + r.runLeader() + } + } +} + +// runFollower runs the FSM for a follower. +func (r *Raft) runFollower() { + didWarn := false + r.logger.Printf("[INFO] raft: %v entering Follower state (Leader: %q)", r, r.Leader()) + metrics.IncrCounter([]string{"raft", "state", "follower"}, 1) + heartbeatTimer := randomTimeout(r.conf.HeartbeatTimeout) + for { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case c := <-r.configurationChangeCh: + // Reject any operations since we are not the leader + c.respond(ErrNotLeader) + + case a := <-r.applyCh: + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case r := <-r.userRestoreCh: + // Reject any restores since we are not the leader + r.respond(ErrNotLeader) + + case c := <-r.configurationsCh: + c.configurations = r.configurations.Clone() + c.respond(nil) + + case b := <-r.bootstrapCh: + b.respond(r.liveBootstrap(b.configuration)) + + case <-heartbeatTimer: + // Restart the heartbeat timer + heartbeatTimer = randomTimeout(r.conf.HeartbeatTimeout) + + // Check if we have had a successful contact + lastContact := r.LastContact() + if time.Now().Sub(lastContact) < r.conf.HeartbeatTimeout { + continue + } + + // Heartbeat failed! Transition to the candidate state + lastLeader := r.Leader() + r.setLeader("") + + if r.configurations.latestIndex == 0 { + if !didWarn { + r.logger.Printf("[WARN] raft: no known peers, aborting election") + didWarn = true + } + } else if r.configurations.latestIndex == r.configurations.committedIndex && + !hasVote(r.configurations.latest, r.localID) { + if !didWarn { + r.logger.Printf("[WARN] raft: not part of stable configuration, aborting election") + didWarn = true + } + } else { + r.logger.Printf(`[WARN] raft: Heartbeat timeout from %q reached, starting election`, lastLeader) + metrics.IncrCounter([]string{"raft", "transition", "heartbeat_timeout"}, 1) + r.setState(Candidate) + return + } + + case <-r.shutdownCh: + return + } + } +} + +// liveBootstrap attempts to seed an initial configuration for the cluster. See +// the Raft object's member BootstrapCluster for more details. This must only be +// called on the main thread, and only makes sense in the follower state. +func (r *Raft) liveBootstrap(configuration Configuration) error { + // Use the pre-init API to make the static updates. + err := BootstrapCluster(&r.conf, r.logs, r.stable, r.snapshots, + r.trans, configuration) + if err != nil { + return err + } + + // Make the configuration live. + var entry Log + if err := r.logs.GetLog(1, &entry); err != nil { + panic(err) + } + r.setCurrentTerm(1) + r.setLastLog(entry.Index, entry.Term) + r.processConfigurationLogEntry(&entry) + return nil +} + +// runCandidate runs the FSM for a candidate. +func (r *Raft) runCandidate() { + r.logger.Printf("[INFO] raft: %v entering Candidate state in term %v", + r, r.getCurrentTerm()+1) + metrics.IncrCounter([]string{"raft", "state", "candidate"}, 1) + + // Start vote for us, and set a timeout + voteCh := r.electSelf() + electionTimer := randomTimeout(r.conf.ElectionTimeout) + + // Tally the votes, need a simple majority + grantedVotes := 0 + votesNeeded := r.quorumSize() + r.logger.Printf("[DEBUG] raft: Votes needed: %d", votesNeeded) + + for r.getState() == Candidate { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case vote := <-voteCh: + // Check if the term is greater than ours, bail + if vote.Term > r.getCurrentTerm() { + r.logger.Printf("[DEBUG] raft: Newer term discovered, fallback to follower") + r.setState(Follower) + r.setCurrentTerm(vote.Term) + return + } + + // Check if the vote is granted + if vote.Granted { + grantedVotes++ + r.logger.Printf("[DEBUG] raft: Vote granted from %s in term %v. Tally: %d", + vote.voterID, vote.Term, grantedVotes) + } + + // Check if we've become the leader + if grantedVotes >= votesNeeded { + r.logger.Printf("[INFO] raft: Election won. Tally: %d", grantedVotes) + r.setState(Leader) + r.setLeader(r.localAddr) + return + } + + case c := <-r.configurationChangeCh: + // Reject any operations since we are not the leader + c.respond(ErrNotLeader) + + case a := <-r.applyCh: + // Reject any operations since we are not the leader + a.respond(ErrNotLeader) + + case v := <-r.verifyCh: + // Reject any operations since we are not the leader + v.respond(ErrNotLeader) + + case r := <-r.userRestoreCh: + // Reject any restores since we are not the leader + r.respond(ErrNotLeader) + + case c := <-r.configurationsCh: + c.configurations = r.configurations.Clone() + c.respond(nil) + + case b := <-r.bootstrapCh: + b.respond(ErrCantBootstrap) + + case <-electionTimer: + // Election failed! Restart the election. We simply return, + // which will kick us back into runCandidate + r.logger.Printf("[WARN] raft: Election timeout reached, restarting election") + return + + case <-r.shutdownCh: + return + } + } +} + +// runLeader runs the FSM for a leader. Do the setup here and drop into +// the leaderLoop for the hot loop. +func (r *Raft) runLeader() { + r.logger.Printf("[INFO] raft: %v entering Leader state", r) + metrics.IncrCounter([]string{"raft", "state", "leader"}, 1) + + // Notify that we are the leader + asyncNotifyBool(r.leaderCh, true) + + // Push to the notify channel if given + if notify := r.conf.NotifyCh; notify != nil { + select { + case notify <- true: + case <-r.shutdownCh: + } + } + + // Setup leader state + r.leaderState.commitCh = make(chan struct{}, 1) + r.leaderState.commitment = newCommitment(r.leaderState.commitCh, + r.configurations.latest, + r.getLastIndex()+1 /* first index that may be committed in this term */) + r.leaderState.inflight = list.New() + r.leaderState.replState = make(map[ServerID]*followerReplication) + r.leaderState.notify = make(map[*verifyFuture]struct{}) + r.leaderState.stepDown = make(chan struct{}, 1) + + // Cleanup state on step down + defer func() { + // Since we were the leader previously, we update our + // last contact time when we step down, so that we are not + // reporting a last contact time from before we were the + // leader. Otherwise, to a client it would seem our data + // is extremely stale. + r.setLastContact() + + // Stop replication + for _, p := range r.leaderState.replState { + close(p.stopCh) + } + + // Respond to all inflight operations + for e := r.leaderState.inflight.Front(); e != nil; e = e.Next() { + e.Value.(*logFuture).respond(ErrLeadershipLost) + } + + // Respond to any pending verify requests + for future := range r.leaderState.notify { + future.respond(ErrLeadershipLost) + } + + // Clear all the state + r.leaderState.commitCh = nil + r.leaderState.commitment = nil + r.leaderState.inflight = nil + r.leaderState.replState = nil + r.leaderState.notify = nil + r.leaderState.stepDown = nil + + // If we are stepping down for some reason, no known leader. + // We may have stepped down due to an RPC call, which would + // provide the leader, so we cannot always blank this out. + r.leaderLock.Lock() + if r.leader == r.localAddr { + r.leader = "" + } + r.leaderLock.Unlock() + + // Notify that we are not the leader + asyncNotifyBool(r.leaderCh, false) + + // Push to the notify channel if given + if notify := r.conf.NotifyCh; notify != nil { + select { + case notify <- false: + case <-r.shutdownCh: + // On shutdown, make a best effort but do not block + select { + case notify <- false: + default: + } + } + } + }() + + // Start a replication routine for each peer + r.startStopReplication() + + // Dispatch a no-op log entry first. This gets this leader up to the latest + // possible commit index, even in the absence of client commands. This used + // to append a configuration entry instead of a noop. However, that permits + // an unbounded number of uncommitted configurations in the log. We now + // maintain that there exists at most one uncommitted configuration entry in + // any log, so we have to do proper no-ops here. + noop := &logFuture{ + log: Log{ + Type: LogNoop, + }, + } + r.dispatchLogs([]*logFuture{noop}) + + // Sit in the leader loop until we step down + r.leaderLoop() +} + +// startStopReplication will set up state and start asynchronous replication to +// new peers, and stop replication to removed peers. Before removing a peer, +// it'll instruct the replication routines to try to replicate to the current +// index. This must only be called from the main thread. +func (r *Raft) startStopReplication() { + inConfig := make(map[ServerID]bool, len(r.configurations.latest.Servers)) + lastIdx := r.getLastIndex() + + // Start replication goroutines that need starting + for _, server := range r.configurations.latest.Servers { + if server.ID == r.localID { + continue + } + inConfig[server.ID] = true + if _, ok := r.leaderState.replState[server.ID]; !ok { + r.logger.Printf("[INFO] raft: Added peer %v, starting replication", server.ID) + s := &followerReplication{ + peer: server, + commitment: r.leaderState.commitment, + stopCh: make(chan uint64, 1), + triggerCh: make(chan struct{}, 1), + currentTerm: r.getCurrentTerm(), + nextIndex: lastIdx + 1, + lastContact: time.Now(), + notify: make(map[*verifyFuture]struct{}), + notifyCh: make(chan struct{}, 1), + stepDown: r.leaderState.stepDown, + } + r.leaderState.replState[server.ID] = s + r.goFunc(func() { r.replicate(s) }) + asyncNotifyCh(s.triggerCh) + } + } + + // Stop replication goroutines that need stopping + for serverID, repl := range r.leaderState.replState { + if inConfig[serverID] { + continue + } + // Replicate up to lastIdx and stop + r.logger.Printf("[INFO] raft: Removed peer %v, stopping replication after %v", serverID, lastIdx) + repl.stopCh <- lastIdx + close(repl.stopCh) + delete(r.leaderState.replState, serverID) + } +} + +// configurationChangeChIfStable returns r.configurationChangeCh if it's safe +// to process requests from it, or nil otherwise. This must only be called +// from the main thread. +// +// Note that if the conditions here were to change outside of leaderLoop to take +// this from nil to non-nil, we would need leaderLoop to be kicked. +func (r *Raft) configurationChangeChIfStable() chan *configurationChangeFuture { + // Have to wait until: + // 1. The latest configuration is committed, and + // 2. This leader has committed some entry (the noop) in this term + // https://groups.google.com/forum/#!msg/raft-dev/t4xj6dJTP6E/d2D9LrWRza8J + if r.configurations.latestIndex == r.configurations.committedIndex && + r.getCommitIndex() >= r.leaderState.commitment.startIndex { + return r.configurationChangeCh + } + return nil +} + +// leaderLoop is the hot loop for a leader. It is invoked +// after all the various leader setup is done. +func (r *Raft) leaderLoop() { + // stepDown is used to track if there is an inflight log that + // would cause us to lose leadership (specifically a RemovePeer of + // ourselves). If this is the case, we must not allow any logs to + // be processed in parallel, otherwise we are basing commit on + // only a single peer (ourself) and replicating to an undefined set + // of peers. + stepDown := false + + lease := time.After(r.conf.LeaderLeaseTimeout) + for r.getState() == Leader { + select { + case rpc := <-r.rpcCh: + r.processRPC(rpc) + + case <-r.leaderState.stepDown: + r.setState(Follower) + + case <-r.leaderState.commitCh: + // Process the newly committed entries + oldCommitIndex := r.getCommitIndex() + commitIndex := r.leaderState.commitment.getCommitIndex() + r.setCommitIndex(commitIndex) + + if r.configurations.latestIndex > oldCommitIndex && + r.configurations.latestIndex <= commitIndex { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + if !hasVote(r.configurations.committed, r.localID) { + stepDown = true + } + } + + for { + e := r.leaderState.inflight.Front() + if e == nil { + break + } + commitLog := e.Value.(*logFuture) + idx := commitLog.log.Index + if idx > commitIndex { + break + } + // Measure the commit time + metrics.MeasureSince([]string{"raft", "commitTime"}, commitLog.dispatch) + r.processLogs(idx, commitLog) + r.leaderState.inflight.Remove(e) + } + + if stepDown { + if r.conf.ShutdownOnRemove { + r.logger.Printf("[INFO] raft: Removed ourself, shutting down") + r.Shutdown() + } else { + r.logger.Printf("[INFO] raft: Removed ourself, transitioning to follower") + r.setState(Follower) + } + } + + case v := <-r.verifyCh: + if v.quorumSize == 0 { + // Just dispatched, start the verification + r.verifyLeader(v) + + } else if v.votes < v.quorumSize { + // Early return, means there must be a new leader + r.logger.Printf("[WARN] raft: New leader elected, stepping down") + r.setState(Follower) + delete(r.leaderState.notify, v) + for _, repl := range r.leaderState.replState { + repl.cleanNotify(v) + } + v.respond(ErrNotLeader) + + } else { + // Quorum of members agree, we are still leader + delete(r.leaderState.notify, v) + for _, repl := range r.leaderState.replState { + repl.cleanNotify(v) + } + v.respond(nil) + } + + case future := <-r.userRestoreCh: + err := r.restoreUserSnapshot(future.meta, future.reader) + future.respond(err) + + case c := <-r.configurationsCh: + c.configurations = r.configurations.Clone() + c.respond(nil) + + case future := <-r.configurationChangeChIfStable(): + r.appendConfigurationEntry(future) + + case b := <-r.bootstrapCh: + b.respond(ErrCantBootstrap) + + case newLog := <-r.applyCh: + // Group commit, gather all the ready commits + ready := []*logFuture{newLog} + for i := 0; i < r.conf.MaxAppendEntries; i++ { + select { + case newLog := <-r.applyCh: + ready = append(ready, newLog) + default: + break + } + } + + // Dispatch the logs + if stepDown { + // we're in the process of stepping down as leader, don't process anything new + for i := range ready { + ready[i].respond(ErrNotLeader) + } + } else { + r.dispatchLogs(ready) + } + + case <-lease: + // Check if we've exceeded the lease, potentially stepping down + maxDiff := r.checkLeaderLease() + + // Next check interval should adjust for the last node we've + // contacted, without going negative + checkInterval := r.conf.LeaderLeaseTimeout - maxDiff + if checkInterval < minCheckInterval { + checkInterval = minCheckInterval + } + + // Renew the lease timer + lease = time.After(checkInterval) + + case <-r.shutdownCh: + return + } + } +} + +// verifyLeader must be called from the main thread for safety. +// Causes the followers to attempt an immediate heartbeat. +func (r *Raft) verifyLeader(v *verifyFuture) { + // Current leader always votes for self + v.votes = 1 + + // Set the quorum size, hot-path for single node + v.quorumSize = r.quorumSize() + if v.quorumSize == 1 { + v.respond(nil) + return + } + + // Track this request + v.notifyCh = r.verifyCh + r.leaderState.notify[v] = struct{}{} + + // Trigger immediate heartbeats + for _, repl := range r.leaderState.replState { + repl.notifyLock.Lock() + repl.notify[v] = struct{}{} + repl.notifyLock.Unlock() + asyncNotifyCh(repl.notifyCh) + } +} + +// checkLeaderLease is used to check if we can contact a quorum of nodes +// within the last leader lease interval. If not, we need to step down, +// as we may have lost connectivity. Returns the maximum duration without +// contact. This must only be called from the main thread. +func (r *Raft) checkLeaderLease() time.Duration { + // Track contacted nodes, we can always contact ourself + contacted := 1 + + // Check each follower + var maxDiff time.Duration + now := time.Now() + for peer, f := range r.leaderState.replState { + diff := now.Sub(f.LastContact()) + if diff <= r.conf.LeaderLeaseTimeout { + contacted++ + if diff > maxDiff { + maxDiff = diff + } + } else { + // Log at least once at high value, then debug. Otherwise it gets very verbose. + if diff <= 3*r.conf.LeaderLeaseTimeout { + r.logger.Printf("[WARN] raft: Failed to contact %v in %v", peer, diff) + } else { + r.logger.Printf("[DEBUG] raft: Failed to contact %v in %v", peer, diff) + } + } + metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond)) + } + + // Verify we can contact a quorum + quorum := r.quorumSize() + if contacted < quorum { + r.logger.Printf("[WARN] raft: Failed to contact quorum of nodes, stepping down") + r.setState(Follower) + metrics.IncrCounter([]string{"raft", "transition", "leader_lease_timeout"}, 1) + } + return maxDiff +} + +// quorumSize is used to return the quorum size. This must only be called on +// the main thread. +// TODO: revisit usage +func (r *Raft) quorumSize() int { + voters := 0 + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + voters++ + } + } + return voters/2 + 1 +} + +// restoreUserSnapshot is used to manually consume an external snapshot, such +// as if restoring from a backup. We will use the current Raft configuration, +// not the one from the snapshot, so that we can restore into a new cluster. We +// will also use the higher of the index of the snapshot, or the current index, +// and then add 1 to that, so we force a new state with a hole in the Raft log, +// so that the snapshot will be sent to followers and used for any new joiners. +// This can only be run on the leader, and returns a future that can be used to +// block until complete. +func (r *Raft) restoreUserSnapshot(meta *SnapshotMeta, reader io.Reader) error { + defer metrics.MeasureSince([]string{"raft", "restoreUserSnapshot"}, time.Now()) + + // Sanity check the version. + version := meta.Version + if version < SnapshotVersionMin || version > SnapshotVersionMax { + return fmt.Errorf("unsupported snapshot version %d", version) + } + + // We don't support snapshots while there's a config change + // outstanding since the snapshot doesn't have a means to + // represent this state. + committedIndex := r.configurations.committedIndex + latestIndex := r.configurations.latestIndex + if committedIndex != latestIndex { + return fmt.Errorf("cannot restore snapshot now, wait until the configuration entry at %v has been applied (have applied %v)", + latestIndex, committedIndex) + } + + // Cancel any inflight requests. + for { + e := r.leaderState.inflight.Front() + if e == nil { + break + } + e.Value.(*logFuture).respond(ErrAbortedByRestore) + r.leaderState.inflight.Remove(e) + } + + // We will overwrite the snapshot metadata with the current term, + // an index that's greater than the current index, or the last + // index in the snapshot. It's important that we leave a hole in + // the index so we know there's nothing in the Raft log there and + // replication will fault and send the snapshot. + term := r.getCurrentTerm() + lastIndex := r.getLastIndex() + if meta.Index > lastIndex { + lastIndex = meta.Index + } + lastIndex++ + + // Dump the snapshot. Note that we use the latest configuration, + // not the one that came with the snapshot. + sink, err := r.snapshots.Create(version, lastIndex, term, + r.configurations.latest, r.configurations.latestIndex, r.trans) + if err != nil { + return fmt.Errorf("failed to create snapshot: %v", err) + } + n, err := io.Copy(sink, reader) + if err != nil { + sink.Cancel() + return fmt.Errorf("failed to write snapshot: %v", err) + } + if n != meta.Size { + sink.Cancel() + return fmt.Errorf("failed to write snapshot, size didn't match (%d != %d)", n, meta.Size) + } + if err := sink.Close(); err != nil { + return fmt.Errorf("failed to close snapshot: %v", err) + } + r.logger.Printf("[INFO] raft: Copied %d bytes to local snapshot", n) + + // Restore the snapshot into the FSM. If this fails we are in a + // bad state so we panic to take ourselves out. + fsm := &restoreFuture{ID: sink.ID()} + fsm.init() + select { + case r.fsmMutateCh <- fsm: + case <-r.shutdownCh: + return ErrRaftShutdown + } + if err := fsm.Error(); err != nil { + panic(fmt.Errorf("failed to restore snapshot: %v", err)) + } + + // We set the last log so it looks like we've stored the empty + // index we burned. The last applied is set because we made the + // FSM take the snapshot state, and we store the last snapshot + // in the stable store since we created a snapshot as part of + // this process. + r.setLastLog(lastIndex, term) + r.setLastApplied(lastIndex) + r.setLastSnapshot(lastIndex, term) + + r.logger.Printf("[INFO] raft: Restored user snapshot (index %d)", lastIndex) + return nil +} + +// appendConfigurationEntry changes the configuration and adds a new +// configuration entry to the log. This must only be called from the +// main thread. +func (r *Raft) appendConfigurationEntry(future *configurationChangeFuture) { + configuration, err := nextConfiguration(r.configurations.latest, r.configurations.latestIndex, future.req) + if err != nil { + future.respond(err) + return + } + + r.logger.Printf("[INFO] raft: Updating configuration with %s (%v, %v) to %+v", + future.req.command, future.req.serverID, future.req.serverAddress, configuration.Servers) + + // In pre-ID compatibility mode we translate all configuration changes + // in to an old remove peer message, which can handle all supported + // cases for peer changes in the pre-ID world (adding and removing + // voters). Both add peer and remove peer log entries are handled + // similarly on old Raft servers, but remove peer does extra checks to + // see if a leader needs to step down. Since they both assert the full + // configuration, then we can safely call remove peer for everything. + if r.protocolVersion < 2 { + future.log = Log{ + Type: LogRemovePeerDeprecated, + Data: encodePeers(configuration, r.trans), + } + } else { + future.log = Log{ + Type: LogConfiguration, + Data: encodeConfiguration(configuration), + } + } + + r.dispatchLogs([]*logFuture{&future.logFuture}) + index := future.Index() + r.configurations.latest = configuration + r.configurations.latestIndex = index + r.leaderState.commitment.setConfiguration(configuration) + r.startStopReplication() +} + +// dispatchLog is called on the leader to push a log to disk, mark it +// as inflight and begin replication of it. +func (r *Raft) dispatchLogs(applyLogs []*logFuture) { + now := time.Now() + defer metrics.MeasureSince([]string{"raft", "leader", "dispatchLog"}, now) + + term := r.getCurrentTerm() + lastIndex := r.getLastIndex() + logs := make([]*Log, len(applyLogs)) + + for idx, applyLog := range applyLogs { + applyLog.dispatch = now + lastIndex++ + applyLog.log.Index = lastIndex + applyLog.log.Term = term + logs[idx] = &applyLog.log + r.leaderState.inflight.PushBack(applyLog) + } + + // Write the log entry locally + if err := r.logs.StoreLogs(logs); err != nil { + r.logger.Printf("[ERR] raft: Failed to commit logs: %v", err) + for _, applyLog := range applyLogs { + applyLog.respond(err) + } + r.setState(Follower) + return + } + r.leaderState.commitment.match(r.localID, lastIndex) + + // Update the last log since it's on disk now + r.setLastLog(lastIndex, term) + + // Notify the replicators of the new log + for _, f := range r.leaderState.replState { + asyncNotifyCh(f.triggerCh) + } +} + +// processLogs is used to apply all the committed entires that haven't been +// applied up to the given index limit. +// This can be called from both leaders and followers. +// Followers call this from AppendEntires, for n entires at a time, and always +// pass future=nil. +// Leaders call this once per inflight when entries are committed. They pass +// the future from inflights. +func (r *Raft) processLogs(index uint64, future *logFuture) { + // Reject logs we've applied already + lastApplied := r.getLastApplied() + if index <= lastApplied { + r.logger.Printf("[WARN] raft: Skipping application of old log: %d", index) + return + } + + // Apply all the preceding logs + for idx := r.getLastApplied() + 1; idx <= index; idx++ { + // Get the log, either from the future or from our log store + if future != nil && future.log.Index == idx { + r.processLog(&future.log, future) + + } else { + l := new(Log) + if err := r.logs.GetLog(idx, l); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at %d: %v", idx, err) + panic(err) + } + r.processLog(l, nil) + } + + // Update the lastApplied index and term + r.setLastApplied(idx) + } +} + +// processLog is invoked to process the application of a single committed log entry. +func (r *Raft) processLog(l *Log, future *logFuture) { + switch l.Type { + case LogBarrier: + // Barrier is handled by the FSM + fallthrough + + case LogCommand: + // Forward to the fsm handler + select { + case r.fsmMutateCh <- &commitTuple{l, future}: + case <-r.shutdownCh: + if future != nil { + future.respond(ErrRaftShutdown) + } + } + + // Return so that the future is only responded to + // by the FSM handler when the application is done + return + + case LogConfiguration: + case LogAddPeerDeprecated: + case LogRemovePeerDeprecated: + case LogNoop: + // Ignore the no-op + + default: + panic(fmt.Errorf("unrecognized log type: %#v", l)) + } + + // Invoke the future if given + if future != nil { + future.respond(nil) + } +} + +// processRPC is called to handle an incoming RPC request. This must only be +// called from the main thread. +func (r *Raft) processRPC(rpc RPC) { + if err := r.checkRPCHeader(rpc); err != nil { + rpc.Respond(nil, err) + return + } + + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + case *RequestVoteRequest: + r.requestVote(rpc, cmd) + case *InstallSnapshotRequest: + r.installSnapshot(rpc, cmd) + default: + r.logger.Printf("[ERR] raft: Got unexpected command: %#v", rpc.Command) + rpc.Respond(nil, fmt.Errorf("unexpected command")) + } +} + +// processHeartbeat is a special handler used just for heartbeat requests +// so that they can be fast-pathed if a transport supports it. This must only +// be called from the main thread. +func (r *Raft) processHeartbeat(rpc RPC) { + defer metrics.MeasureSince([]string{"raft", "rpc", "processHeartbeat"}, time.Now()) + + // Check if we are shutdown, just ignore the RPC + select { + case <-r.shutdownCh: + return + default: + } + + // Ensure we are only handling a heartbeat + switch cmd := rpc.Command.(type) { + case *AppendEntriesRequest: + r.appendEntries(rpc, cmd) + default: + r.logger.Printf("[ERR] raft: Expected heartbeat, got command: %#v", rpc.Command) + rpc.Respond(nil, fmt.Errorf("unexpected command")) + } +} + +// appendEntries is invoked when we get an append entries RPC call. This must +// only be called from the main thread. +func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "appendEntries"}, time.Now()) + // Setup a response + resp := &AppendEntriesResponse{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + LastLog: r.getLastIndex(), + Success: false, + NoRetryBackoff: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Ignore an older term + if a.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one, also transition to follower + // if we ever get an appendEntries call + if a.Term > r.getCurrentTerm() || r.getState() != Follower { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(a.Term) + resp.Term = a.Term + } + + // Save the current leader + r.setLeader(ServerAddress(r.trans.DecodePeer(a.Leader))) + + // Verify the last log entry + if a.PrevLogEntry > 0 { + lastIdx, lastTerm := r.getLastEntry() + + var prevLogTerm uint64 + if a.PrevLogEntry == lastIdx { + prevLogTerm = lastTerm + + } else { + var prevLog Log + if err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil { + r.logger.Printf("[WARN] raft: Failed to get previous log: %d %v (last: %d)", + a.PrevLogEntry, err, lastIdx) + resp.NoRetryBackoff = true + return + } + prevLogTerm = prevLog.Term + } + + if a.PrevLogTerm != prevLogTerm { + r.logger.Printf("[WARN] raft: Previous log term mis-match: ours: %d remote: %d", + prevLogTerm, a.PrevLogTerm) + resp.NoRetryBackoff = true + return + } + } + + // Process any new entries + if len(a.Entries) > 0 { + start := time.Now() + + // Delete any conflicting entries, skip any duplicates + lastLogIdx, _ := r.getLastLog() + var newEntries []*Log + for i, entry := range a.Entries { + if entry.Index > lastLogIdx { + newEntries = a.Entries[i:] + break + } + var storeEntry Log + if err := r.logs.GetLog(entry.Index, &storeEntry); err != nil { + r.logger.Printf("[WARN] raft: Failed to get log entry %d: %v", + entry.Index, err) + return + } + if entry.Term != storeEntry.Term { + r.logger.Printf("[WARN] raft: Clearing log suffix from %d to %d", entry.Index, lastLogIdx) + if err := r.logs.DeleteRange(entry.Index, lastLogIdx); err != nil { + r.logger.Printf("[ERR] raft: Failed to clear log suffix: %v", err) + return + } + if entry.Index <= r.configurations.latestIndex { + r.configurations.latest = r.configurations.committed + r.configurations.latestIndex = r.configurations.committedIndex + } + newEntries = a.Entries[i:] + break + } + } + + if n := len(newEntries); n > 0 { + // Append the new entries + if err := r.logs.StoreLogs(newEntries); err != nil { + r.logger.Printf("[ERR] raft: Failed to append to logs: %v", err) + // TODO: leaving r.getLastLog() in the wrong + // state if there was a truncation above + return + } + + // Handle any new configuration changes + for _, newEntry := range newEntries { + r.processConfigurationLogEntry(newEntry) + } + + // Update the lastLog + last := newEntries[n-1] + r.setLastLog(last.Index, last.Term) + } + + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "storeLogs"}, start) + } + + // Update the commit index + if a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() { + start := time.Now() + idx := min(a.LeaderCommitIndex, r.getLastIndex()) + r.setCommitIndex(idx) + if r.configurations.latestIndex <= idx { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + } + r.processLogs(idx, nil) + metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "processLogs"}, start) + } + + // Everything went well, set success + resp.Success = true + r.setLastContact() + return +} + +// processConfigurationLogEntry takes a log entry and updates the latest +// configuration if the entry results in a new configuration. This must only be +// called from the main thread, or from NewRaft() before any threads have begun. +func (r *Raft) processConfigurationLogEntry(entry *Log) { + if entry.Type == LogConfiguration { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + r.configurations.latest = decodeConfiguration(entry.Data) + r.configurations.latestIndex = entry.Index + } else if entry.Type == LogAddPeerDeprecated || entry.Type == LogRemovePeerDeprecated { + r.configurations.committed = r.configurations.latest + r.configurations.committedIndex = r.configurations.latestIndex + r.configurations.latest = decodePeers(entry.Data, r.trans) + r.configurations.latestIndex = entry.Index + } +} + +// requestVote is invoked when we get an request vote RPC call. +func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "requestVote"}, time.Now()) + r.observe(*req) + + // Setup a response + resp := &RequestVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + Granted: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Version 0 servers will panic unless the peers is present. It's only + // used on them to produce a warning message. + if r.protocolVersion < 2 { + resp.Peers = encodePeers(r.configurations.latest, r.trans) + } + + // Check if we have an existing leader [who's not the candidate] + candidate := r.trans.DecodePeer(req.Candidate) + if leader := r.Leader(); leader != "" && leader != candidate { + r.logger.Printf("[WARN] raft: Rejecting vote request from %v since we have a leader: %v", + candidate, leader) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(req.Term) + resp.Term = req.Term + } + + // Check if we have voted yet + lastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm) + if err != nil && err.Error() != "not found" { + r.logger.Printf("[ERR] raft: Failed to get last vote term: %v", err) + return + } + lastVoteCandBytes, err := r.stable.Get(keyLastVoteCand) + if err != nil && err.Error() != "not found" { + r.logger.Printf("[ERR] raft: Failed to get last vote candidate: %v", err) + return + } + + // Check if we've voted in this election before + if lastVoteTerm == req.Term && lastVoteCandBytes != nil { + r.logger.Printf("[INFO] raft: Duplicate RequestVote for same term: %d", req.Term) + if bytes.Compare(lastVoteCandBytes, req.Candidate) == 0 { + r.logger.Printf("[WARN] raft: Duplicate RequestVote from candidate: %s", req.Candidate) + resp.Granted = true + } + return + } + + // Reject if their term is older + lastIdx, lastTerm := r.getLastEntry() + if lastTerm > req.LastLogTerm { + r.logger.Printf("[WARN] raft: Rejecting vote request from %v since our last term is greater (%d, %d)", + candidate, lastTerm, req.LastLogTerm) + return + } + + if lastTerm == req.LastLogTerm && lastIdx > req.LastLogIndex { + r.logger.Printf("[WARN] raft: Rejecting vote request from %v since our last index is greater (%d, %d)", + candidate, lastIdx, req.LastLogIndex) + return + } + + // Persist a vote for safety + if err := r.persistVote(req.Term, req.Candidate); err != nil { + r.logger.Printf("[ERR] raft: Failed to persist vote: %v", err) + return + } + + resp.Granted = true + r.setLastContact() + return +} + +// installSnapshot is invoked when we get a InstallSnapshot RPC call. +// We must be in the follower state for this, since it means we are +// too far behind a leader for log replay. This must only be called +// from the main thread. +func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "installSnapshot"}, time.Now()) + // Setup a response + resp := &InstallSnapshotResponse{ + Term: r.getCurrentTerm(), + Success: false, + } + var rpcErr error + defer func() { + io.Copy(ioutil.Discard, rpc.Reader) // ensure we always consume all the snapshot data from the stream [see issue #212] + rpc.Respond(resp, rpcErr) + }() + + // Sanity check the version + if req.SnapshotVersion < SnapshotVersionMin || + req.SnapshotVersion > SnapshotVersionMax { + rpcErr = fmt.Errorf("unsupported snapshot version %d", req.SnapshotVersion) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + r.logger.Printf("[INFO] raft: Ignoring installSnapshot request with older term of %d vs currentTerm %d", req.Term, r.getCurrentTerm()) + return + } + + // Increase the term if we see a newer one + if req.Term > r.getCurrentTerm() { + // Ensure transition to follower + r.setState(Follower) + r.setCurrentTerm(req.Term) + resp.Term = req.Term + } + + // Save the current leader + r.setLeader(ServerAddress(r.trans.DecodePeer(req.Leader))) + + // Create a new snapshot + var reqConfiguration Configuration + var reqConfigurationIndex uint64 + if req.SnapshotVersion > 0 { + reqConfiguration = decodeConfiguration(req.Configuration) + reqConfigurationIndex = req.ConfigurationIndex + } else { + reqConfiguration = decodePeers(req.Peers, r.trans) + reqConfigurationIndex = req.LastLogIndex + } + version := getSnapshotVersion(r.protocolVersion) + sink, err := r.snapshots.Create(version, req.LastLogIndex, req.LastLogTerm, + reqConfiguration, reqConfigurationIndex, r.trans) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to create snapshot to install: %v", err) + rpcErr = fmt.Errorf("failed to create snapshot: %v", err) + return + } + + // Spill the remote snapshot to disk + n, err := io.Copy(sink, rpc.Reader) + if err != nil { + sink.Cancel() + r.logger.Printf("[ERR] raft: Failed to copy snapshot: %v", err) + rpcErr = err + return + } + + // Check that we received it all + if n != req.Size { + sink.Cancel() + r.logger.Printf("[ERR] raft: Failed to receive whole snapshot: %d / %d", n, req.Size) + rpcErr = fmt.Errorf("short read") + return + } + + // Finalize the snapshot + if err := sink.Close(); err != nil { + r.logger.Printf("[ERR] raft: Failed to finalize snapshot: %v", err) + rpcErr = err + return + } + r.logger.Printf("[INFO] raft: Copied %d bytes to local snapshot", n) + + // Restore snapshot + future := &restoreFuture{ID: sink.ID()} + future.init() + select { + case r.fsmMutateCh <- future: + case <-r.shutdownCh: + future.respond(ErrRaftShutdown) + return + } + + // Wait for the restore to happen + if err := future.Error(); err != nil { + r.logger.Printf("[ERR] raft: Failed to restore snapshot: %v", err) + rpcErr = err + return + } + + // Update the lastApplied so we don't replay old logs + r.setLastApplied(req.LastLogIndex) + + // Update the last stable snapshot info + r.setLastSnapshot(req.LastLogIndex, req.LastLogTerm) + + // Restore the peer set + r.configurations.latest = reqConfiguration + r.configurations.latestIndex = reqConfigurationIndex + r.configurations.committed = reqConfiguration + r.configurations.committedIndex = reqConfigurationIndex + + // Compact logs, continue even if this fails + if err := r.compactLogs(req.LastLogIndex); err != nil { + r.logger.Printf("[ERR] raft: Failed to compact logs: %v", err) + } + + r.logger.Printf("[INFO] raft: Installed remote snapshot") + resp.Success = true + r.setLastContact() + return +} + +// setLastContact is used to set the last contact time to now +func (r *Raft) setLastContact() { + r.lastContactLock.Lock() + r.lastContact = time.Now() + r.lastContactLock.Unlock() +} + +type voteResult struct { + RequestVoteResponse + voterID ServerID +} + +// electSelf is used to send a RequestVote RPC to all peers, and vote for +// ourself. This has the side affecting of incrementing the current term. The +// response channel returned is used to wait for all the responses (including a +// vote for ourself). This must only be called from the main thread. +func (r *Raft) electSelf() <-chan *voteResult { + // Create a response channel + respCh := make(chan *voteResult, len(r.configurations.latest.Servers)) + + // Increment the term + r.setCurrentTerm(r.getCurrentTerm() + 1) + + // Construct the request + lastIdx, lastTerm := r.getLastEntry() + req := &RequestVoteRequest{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + Candidate: r.trans.EncodePeer(r.localID, r.localAddr), + LastLogIndex: lastIdx, + LastLogTerm: lastTerm, + } + + // Construct a function to ask for a vote + askPeer := func(peer Server) { + r.goFunc(func() { + defer metrics.MeasureSince([]string{"raft", "candidate", "electSelf"}, time.Now()) + resp := &voteResult{voterID: peer.ID} + err := r.trans.RequestVote(peer.ID, peer.Address, req, &resp.RequestVoteResponse) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to make RequestVote RPC to %v: %v", peer, err) + resp.Term = req.Term + resp.Granted = false + } + respCh <- resp + }) + } + + // For each peer, request a vote + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + // Persist a vote for ourselves + if err := r.persistVote(req.Term, req.Candidate); err != nil { + r.logger.Printf("[ERR] raft: Failed to persist vote : %v", err) + return nil + } + // Include our own vote + respCh <- &voteResult{ + RequestVoteResponse: RequestVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: req.Term, + Granted: true, + }, + voterID: r.localID, + } + } else { + askPeer(server) + } + } + } + + return respCh +} + +// persistVote is used to persist our vote for safety. +func (r *Raft) persistVote(term uint64, candidate []byte) error { + if err := r.stable.SetUint64(keyLastVoteTerm, term); err != nil { + return err + } + if err := r.stable.Set(keyLastVoteCand, candidate); err != nil { + return err + } + return nil +} + +// setCurrentTerm is used to set the current term in a durable manner. +func (r *Raft) setCurrentTerm(t uint64) { + // Persist to disk first + if err := r.stable.SetUint64(keyCurrentTerm, t); err != nil { + panic(fmt.Errorf("failed to save current term: %v", err)) + } + r.raftState.setCurrentTerm(t) +} + +// setState is used to update the current state. Any state +// transition causes the known leader to be cleared. This means +// that leader should be set only after updating the state. +func (r *Raft) setState(state RaftState) { + r.setLeader("") + oldState := r.raftState.getState() + r.raftState.setState(state) + if oldState != state { + r.observe(state) + } +} diff --git a/vendor/github.com/hashicorp/raft/replication.go b/vendor/github.com/hashicorp/raft/replication.go new file mode 100644 index 0000000..1b23e84 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/replication.go @@ -0,0 +1,568 @@ +package raft + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/armon/go-metrics" +) + +const ( + maxFailureScale = 12 + failureWait = 10 * time.Millisecond +) + +var ( + // ErrLogNotFound indicates a given log entry is not available. + ErrLogNotFound = errors.New("log not found") + + // ErrPipelineReplicationNotSupported can be returned by the transport to + // signal that pipeline replication is not supported in general, and that + // no error message should be produced. + ErrPipelineReplicationNotSupported = errors.New("pipeline replication not supported") +) + +// followerReplication is in charge of sending snapshots and log entries from +// this leader during this particular term to a remote follower. +type followerReplication struct { + // peer contains the network address and ID of the remote follower. + peer Server + + // commitment tracks the entries acknowledged by followers so that the + // leader's commit index can advance. It is updated on successful + // AppendEntries responses. + commitment *commitment + + // stopCh is notified/closed when this leader steps down or the follower is + // removed from the cluster. In the follower removed case, it carries a log + // index; replication should be attempted with a best effort up through that + // index, before exiting. + stopCh chan uint64 + // triggerCh is notified every time new entries are appended to the log. + triggerCh chan struct{} + + // currentTerm is the term of this leader, to be included in AppendEntries + // requests. + currentTerm uint64 + // nextIndex is the index of the next log entry to send to the follower, + // which may fall past the end of the log. + nextIndex uint64 + + // lastContact is updated to the current time whenever any response is + // received from the follower (successful or not). This is used to check + // whether the leader should step down (Raft.checkLeaderLease()). + lastContact time.Time + // lastContactLock protects 'lastContact'. + lastContactLock sync.RWMutex + + // failures counts the number of failed RPCs since the last success, which is + // used to apply backoff. + failures uint64 + + // notifyCh is notified to send out a heartbeat, which is used to check that + // this server is still leader. + notifyCh chan struct{} + // notify is a map of futures to be resolved upon receipt of an + // acknowledgement, then cleared from this map. + notify map[*verifyFuture]struct{} + // notifyLock protects 'notify'. + notifyLock sync.Mutex + + // stepDown is used to indicate to the leader that we + // should step down based on information from a follower. + stepDown chan struct{} + + // allowPipeline is used to determine when to pipeline the AppendEntries RPCs. + // It is private to this replication goroutine. + allowPipeline bool +} + +// notifyAll is used to notify all the waiting verify futures +// if the follower believes we are still the leader. +func (s *followerReplication) notifyAll(leader bool) { + // Clear the waiting notifies minimizing lock time + s.notifyLock.Lock() + n := s.notify + s.notify = make(map[*verifyFuture]struct{}) + s.notifyLock.Unlock() + + // Submit our votes + for v, _ := range n { + v.vote(leader) + } +} + +// cleanNotify is used to delete notify, . +func (s *followerReplication) cleanNotify(v *verifyFuture) { + s.notifyLock.Lock() + delete(s.notify, v) + s.notifyLock.Unlock() +} + +// LastContact returns the time of last contact. +func (s *followerReplication) LastContact() time.Time { + s.lastContactLock.RLock() + last := s.lastContact + s.lastContactLock.RUnlock() + return last +} + +// setLastContact sets the last contact to the current time. +func (s *followerReplication) setLastContact() { + s.lastContactLock.Lock() + s.lastContact = time.Now() + s.lastContactLock.Unlock() +} + +// replicate is a long running routine that replicates log entries to a single +// follower. +func (r *Raft) replicate(s *followerReplication) { + // Start an async heartbeating routing + stopHeartbeat := make(chan struct{}) + defer close(stopHeartbeat) + r.goFunc(func() { r.heartbeat(s, stopHeartbeat) }) + +RPC: + shouldStop := false + for !shouldStop { + select { + case maxIndex := <-s.stopCh: + // Make a best effort to replicate up to this index + if maxIndex > 0 { + r.replicateTo(s, maxIndex) + } + return + case <-s.triggerCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.replicateTo(s, lastLogIdx) + case <-randomTimeout(r.conf.CommitTimeout): // TODO: what is this? + lastLogIdx, _ := r.getLastLog() + shouldStop = r.replicateTo(s, lastLogIdx) + } + + // If things looks healthy, switch to pipeline mode + if !shouldStop && s.allowPipeline { + goto PIPELINE + } + } + return + +PIPELINE: + // Disable until re-enabled + s.allowPipeline = false + + // Replicates using a pipeline for high performance. This method + // is not able to gracefully recover from errors, and so we fall back + // to standard mode on failure. + if err := r.pipelineReplicate(s); err != nil { + if err != ErrPipelineReplicationNotSupported { + r.logger.Printf("[ERR] raft: Failed to start pipeline replication to %s: %s", s.peer, err) + } + } + goto RPC +} + +// replicateTo is a helper to replicate(), used to replicate the logs up to a +// given last index. +// If the follower log is behind, we take care to bring them up to date. +func (r *Raft) replicateTo(s *followerReplication, lastIndex uint64) (shouldStop bool) { + // Create the base request + var req AppendEntriesRequest + var resp AppendEntriesResponse + var start time.Time +START: + // Prevent an excessive retry rate on errors + if s.failures > 0 { + select { + case <-time.After(backoff(failureWait, s.failures, maxFailureScale)): + case <-r.shutdownCh: + } + } + + // Setup the request + if err := r.setupAppendEntries(s, &req, s.nextIndex, lastIndex); err == ErrLogNotFound { + goto SEND_SNAP + } else if err != nil { + return + } + + // Make the RPC call + start = time.Now() + if err := r.trans.AppendEntries(s.peer.ID, s.peer.Address, &req, &resp); err != nil { + r.logger.Printf("[ERR] raft: Failed to AppendEntries to %v: %v", s.peer, err) + s.failures++ + return + } + appendStats(string(s.peer.ID), start, float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true + } + + // Update the last contact + s.setLastContact() + + // Update s based on success + if resp.Success { + // Update our replication state + updateLastAppended(s, &req) + + // Clear any failures, allow pipelining + s.failures = 0 + s.allowPipeline = true + } else { + s.nextIndex = max(min(s.nextIndex-1, resp.LastLog+1), 1) + if resp.NoRetryBackoff { + s.failures = 0 + } else { + s.failures++ + } + r.logger.Printf("[WARN] raft: AppendEntries to %v rejected, sending older logs (next: %d)", s.peer, s.nextIndex) + } + +CHECK_MORE: + // Poll the stop channel here in case we are looping and have been asked + // to stop, or have stepped down as leader. Even for the best effort case + // where we are asked to replicate to a given index and then shutdown, + // it's better to not loop in here to send lots of entries to a straggler + // that's leaving the cluster anyways. + select { + case <-s.stopCh: + return true + default: + } + + // Check if there are more logs to replicate + if s.nextIndex <= lastIndex { + goto START + } + return + + // SEND_SNAP is used when we fail to get a log, usually because the follower + // is too far behind, and we must ship a snapshot down instead +SEND_SNAP: + if stop, err := r.sendLatestSnapshot(s); stop { + return true + } else if err != nil { + r.logger.Printf("[ERR] raft: Failed to send snapshot to %v: %v", s.peer, err) + return + } + + // Check if there is more to replicate + goto CHECK_MORE +} + +// sendLatestSnapshot is used to send the latest snapshot we have +// down to our follower. +func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { + // Get the snapshots + snapshots, err := r.snapshots.List() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) + return false, err + } + + // Check we have at least a single snapshot + if len(snapshots) == 0 { + return false, fmt.Errorf("no snapshots found") + } + + // Open the most recent snapshot + snapID := snapshots[0].ID + meta, snapshot, err := r.snapshots.Open(snapID) + if err != nil { + r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapID, err) + return false, err + } + defer snapshot.Close() + + // Setup the request + req := InstallSnapshotRequest{ + RPCHeader: r.getRPCHeader(), + SnapshotVersion: meta.Version, + Term: s.currentTerm, + Leader: r.trans.EncodePeer(r.localID, r.localAddr), + LastLogIndex: meta.Index, + LastLogTerm: meta.Term, + Peers: meta.Peers, + Size: meta.Size, + Configuration: encodeConfiguration(meta.Configuration), + ConfigurationIndex: meta.ConfigurationIndex, + } + + // Make the call + start := time.Now() + var resp InstallSnapshotResponse + if err := r.trans.InstallSnapshot(s.peer.ID, s.peer.Address, &req, &resp, snapshot); err != nil { + r.logger.Printf("[ERR] raft: Failed to install snapshot %v: %v", snapID, err) + s.failures++ + return false, err + } + metrics.MeasureSince([]string{"raft", "replication", "installSnapshot", string(s.peer.ID)}, start) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return true, nil + } + + // Update the last contact + s.setLastContact() + + // Check for success + if resp.Success { + // Update the indexes + s.nextIndex = meta.Index + 1 + s.commitment.match(s.peer.ID, meta.Index) + + // Clear any failures + s.failures = 0 + + // Notify we are still leader + s.notifyAll(true) + } else { + s.failures++ + r.logger.Printf("[WARN] raft: InstallSnapshot to %v rejected", s.peer) + } + return false, nil +} + +// heartbeat is used to periodically invoke AppendEntries on a peer +// to ensure they don't time out. This is done async of replicate(), +// since that routine could potentially be blocked on disk IO. +func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) { + var failures uint64 + req := AppendEntriesRequest{ + RPCHeader: r.getRPCHeader(), + Term: s.currentTerm, + Leader: r.trans.EncodePeer(r.localID, r.localAddr), + } + var resp AppendEntriesResponse + for { + // Wait for the next heartbeat interval or forced notify + select { + case <-s.notifyCh: + case <-randomTimeout(r.conf.HeartbeatTimeout / 10): + case <-stopCh: + return + } + + start := time.Now() + if err := r.trans.AppendEntries(s.peer.ID, s.peer.Address, &req, &resp); err != nil { + r.logger.Printf("[ERR] raft: Failed to heartbeat to %v: %v", s.peer.Address, err) + failures++ + select { + case <-time.After(backoff(failureWait, failures, maxFailureScale)): + case <-stopCh: + } + } else { + s.setLastContact() + failures = 0 + metrics.MeasureSince([]string{"raft", "replication", "heartbeat", string(s.peer.ID)}, start) + s.notifyAll(resp.Success) + } + } +} + +// pipelineReplicate is used when we have synchronized our state with the follower, +// and want to switch to a higher performance pipeline mode of replication. +// We only pipeline AppendEntries commands, and if we ever hit an error, we fall +// back to the standard replication which can handle more complex situations. +func (r *Raft) pipelineReplicate(s *followerReplication) error { + // Create a new pipeline + pipeline, err := r.trans.AppendEntriesPipeline(s.peer.ID, s.peer.Address) + if err != nil { + return err + } + defer pipeline.Close() + + // Log start and stop of pipeline + r.logger.Printf("[INFO] raft: pipelining replication to peer %v", s.peer) + defer r.logger.Printf("[INFO] raft: aborting pipeline replication to peer %v", s.peer) + + // Create a shutdown and finish channel + stopCh := make(chan struct{}) + finishCh := make(chan struct{}) + + // Start a dedicated decoder + r.goFunc(func() { r.pipelineDecode(s, pipeline, stopCh, finishCh) }) + + // Start pipeline sends at the last good nextIndex + nextIndex := s.nextIndex + + shouldStop := false +SEND: + for !shouldStop { + select { + case <-finishCh: + break SEND + case maxIndex := <-s.stopCh: + // Make a best effort to replicate up to this index + if maxIndex > 0 { + r.pipelineSend(s, pipeline, &nextIndex, maxIndex) + } + break SEND + case <-s.triggerCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) + case <-randomTimeout(r.conf.CommitTimeout): + lastLogIdx, _ := r.getLastLog() + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) + } + } + + // Stop our decoder, and wait for it to finish + close(stopCh) + select { + case <-finishCh: + case <-r.shutdownCh: + } + return nil +} + +// pipelineSend is used to send data over a pipeline. It is a helper to +// pipelineReplicate. +func (r *Raft) pipelineSend(s *followerReplication, p AppendPipeline, nextIdx *uint64, lastIndex uint64) (shouldStop bool) { + // Create a new append request + req := new(AppendEntriesRequest) + if err := r.setupAppendEntries(s, req, *nextIdx, lastIndex); err != nil { + return true + } + + // Pipeline the append entries + if _, err := p.AppendEntries(req, new(AppendEntriesResponse)); err != nil { + r.logger.Printf("[ERR] raft: Failed to pipeline AppendEntries to %v: %v", s.peer, err) + return true + } + + // Increase the next send log to avoid re-sending old logs + if n := len(req.Entries); n > 0 { + last := req.Entries[n-1] + *nextIdx = last.Index + 1 + } + return false +} + +// pipelineDecode is used to decode the responses of pipelined requests. +func (r *Raft) pipelineDecode(s *followerReplication, p AppendPipeline, stopCh, finishCh chan struct{}) { + defer close(finishCh) + respCh := p.Consumer() + for { + select { + case ready := <-respCh: + req, resp := ready.Request(), ready.Response() + appendStats(string(s.peer.ID), ready.Start(), float32(len(req.Entries))) + + // Check for a newer term, stop running + if resp.Term > req.Term { + r.handleStaleTerm(s) + return + } + + // Update the last contact + s.setLastContact() + + // Abort pipeline if not successful + if !resp.Success { + return + } + + // Update our replication state + updateLastAppended(s, req) + case <-stopCh: + return + } + } +} + +// setupAppendEntries is used to setup an append entries request. +func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + req.RPCHeader = r.getRPCHeader() + req.Term = s.currentTerm + req.Leader = r.trans.EncodePeer(r.localID, r.localAddr) + req.LeaderCommitIndex = r.getCommitIndex() + if err := r.setPreviousLog(req, nextIndex); err != nil { + return err + } + if err := r.setNewLogs(req, nextIndex, lastIndex); err != nil { + return err + } + return nil +} + +// setPreviousLog is used to setup the PrevLogEntry and PrevLogTerm for an +// AppendEntriesRequest given the next index to replicate. +func (r *Raft) setPreviousLog(req *AppendEntriesRequest, nextIndex uint64) error { + // Guard for the first index, since there is no 0 log entry + // Guard against the previous index being a snapshot as well + lastSnapIdx, lastSnapTerm := r.getLastSnapshot() + if nextIndex == 1 { + req.PrevLogEntry = 0 + req.PrevLogTerm = 0 + + } else if (nextIndex - 1) == lastSnapIdx { + req.PrevLogEntry = lastSnapIdx + req.PrevLogTerm = lastSnapTerm + + } else { + var l Log + if err := r.logs.GetLog(nextIndex-1, &l); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", + nextIndex-1, err) + return err + } + + // Set the previous index and term (0 if nextIndex is 1) + req.PrevLogEntry = l.Index + req.PrevLogTerm = l.Term + } + return nil +} + +// setNewLogs is used to setup the logs which should be appended for a request. +func (r *Raft) setNewLogs(req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { + // Append up to MaxAppendEntries or up to the lastIndex + req.Entries = make([]*Log, 0, r.conf.MaxAppendEntries) + maxIndex := min(nextIndex+uint64(r.conf.MaxAppendEntries)-1, lastIndex) + for i := nextIndex; i <= maxIndex; i++ { + oldLog := new(Log) + if err := r.logs.GetLog(i, oldLog); err != nil { + r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", i, err) + return err + } + req.Entries = append(req.Entries, oldLog) + } + return nil +} + +// appendStats is used to emit stats about an AppendEntries invocation. +func appendStats(peer string, start time.Time, logs float32) { + metrics.MeasureSince([]string{"raft", "replication", "appendEntries", "rpc", peer}, start) + metrics.IncrCounter([]string{"raft", "replication", "appendEntries", "logs", peer}, logs) +} + +// handleStaleTerm is used when a follower indicates that we have a stale term. +func (r *Raft) handleStaleTerm(s *followerReplication) { + r.logger.Printf("[ERR] raft: peer %v has newer term, stopping replication", s.peer) + s.notifyAll(false) // No longer leader + asyncNotifyCh(s.stepDown) +} + +// updateLastAppended is used to update follower replication state after a +// successful AppendEntries RPC. +// TODO: This isn't used during InstallSnapshot, but the code there is similar. +func updateLastAppended(s *followerReplication, req *AppendEntriesRequest) { + // Mark any inflight logs as committed + if logs := req.Entries; len(logs) > 0 { + last := logs[len(logs)-1] + s.nextIndex = last.Index + 1 + s.commitment.match(s.peer.ID, last.Index) + } + + // Notify still leader + s.notifyAll(true) +} diff --git a/vendor/github.com/hashicorp/raft/snapshot.go b/vendor/github.com/hashicorp/raft/snapshot.go new file mode 100644 index 0000000..5287ebc --- /dev/null +++ b/vendor/github.com/hashicorp/raft/snapshot.go @@ -0,0 +1,239 @@ +package raft + +import ( + "fmt" + "io" + "time" + + "github.com/armon/go-metrics" +) + +// SnapshotMeta is for metadata of a snapshot. +type SnapshotMeta struct { + // Version is the version number of the snapshot metadata. This does not cover + // the application's data in the snapshot, that should be versioned + // separately. + Version SnapshotVersion + + // ID is opaque to the store, and is used for opening. + ID string + + // Index and Term store when the snapshot was taken. + Index uint64 + Term uint64 + + // Peers is deprecated and used to support version 0 snapshots, but will + // be populated in version 1 snapshots as well to help with upgrades. + Peers []byte + + // Configuration and ConfigurationIndex are present in version 1 + // snapshots and later. + Configuration Configuration + ConfigurationIndex uint64 + + // Size is the size of the snapshot in bytes. + Size int64 +} + +// SnapshotStore interface is used to allow for flexible implementations +// of snapshot storage and retrieval. For example, a client could implement +// a shared state store such as S3, allowing new nodes to restore snapshots +// without streaming from the leader. +type SnapshotStore interface { + // Create is used to begin a snapshot at a given index and term, and with + // the given committed configuration. The version parameter controls + // which snapshot version to create. + Create(version SnapshotVersion, index, term uint64, configuration Configuration, + configurationIndex uint64, trans Transport) (SnapshotSink, error) + + // List is used to list the available snapshots in the store. + // It should return then in descending order, with the highest index first. + List() ([]*SnapshotMeta, error) + + // Open takes a snapshot ID and provides a ReadCloser. Once close is + // called it is assumed the snapshot is no longer needed. + Open(id string) (*SnapshotMeta, io.ReadCloser, error) +} + +// SnapshotSink is returned by StartSnapshot. The FSM will Write state +// to the sink and call Close on completion. On error, Cancel will be invoked. +type SnapshotSink interface { + io.WriteCloser + ID() string + Cancel() error +} + +// runSnapshots is a long running goroutine used to manage taking +// new snapshots of the FSM. It runs in parallel to the FSM and +// main goroutines, so that snapshots do not block normal operation. +func (r *Raft) runSnapshots() { + for { + select { + case <-randomTimeout(r.conf.SnapshotInterval): + // Check if we should snapshot + if !r.shouldSnapshot() { + continue + } + + // Trigger a snapshot + if _, err := r.takeSnapshot(); err != nil { + r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) + } + + case future := <-r.userSnapshotCh: + // User-triggered, run immediately + id, err := r.takeSnapshot() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) + } else { + future.opener = func() (*SnapshotMeta, io.ReadCloser, error) { + return r.snapshots.Open(id) + } + } + future.respond(err) + + case <-r.shutdownCh: + return + } + } +} + +// shouldSnapshot checks if we meet the conditions to take +// a new snapshot. +func (r *Raft) shouldSnapshot() bool { + // Check the last snapshot index + lastSnap, _ := r.getLastSnapshot() + + // Check the last log index + lastIdx, err := r.logs.LastIndex() + if err != nil { + r.logger.Printf("[ERR] raft: Failed to get last log index: %v", err) + return false + } + + // Compare the delta to the threshold + delta := lastIdx - lastSnap + return delta >= r.conf.SnapshotThreshold +} + +// takeSnapshot is used to take a new snapshot. This must only be called from +// the snapshot thread, never the main thread. This returns the ID of the new +// snapshot, along with an error. +func (r *Raft) takeSnapshot() (string, error) { + defer metrics.MeasureSince([]string{"raft", "snapshot", "takeSnapshot"}, time.Now()) + + // Create a request for the FSM to perform a snapshot. + snapReq := &reqSnapshotFuture{} + snapReq.init() + + // Wait for dispatch or shutdown. + select { + case r.fsmSnapshotCh <- snapReq: + case <-r.shutdownCh: + return "", ErrRaftShutdown + } + + // Wait until we get a response + if err := snapReq.Error(); err != nil { + if err != ErrNothingNewToSnapshot { + err = fmt.Errorf("failed to start snapshot: %v", err) + } + return "", err + } + defer snapReq.snapshot.Release() + + // Make a request for the configurations and extract the committed info. + // We have to use the future here to safely get this information since + // it is owned by the main thread. + configReq := &configurationsFuture{} + configReq.init() + select { + case r.configurationsCh <- configReq: + case <-r.shutdownCh: + return "", ErrRaftShutdown + } + if err := configReq.Error(); err != nil { + return "", err + } + committed := configReq.configurations.committed + committedIndex := configReq.configurations.committedIndex + + // We don't support snapshots while there's a config change outstanding + // since the snapshot doesn't have a means to represent this state. This + // is a little weird because we need the FSM to apply an index that's + // past the configuration change, even though the FSM itself doesn't see + // the configuration changes. It should be ok in practice with normal + // application traffic flowing through the FSM. If there's none of that + // then it's not crucial that we snapshot, since there's not much going + // on Raft-wise. + if snapReq.index < committedIndex { + return "", fmt.Errorf("cannot take snapshot now, wait until the configuration entry at %v has been applied (have applied %v)", + committedIndex, snapReq.index) + } + + // Create a new snapshot. + r.logger.Printf("[INFO] raft: Starting snapshot up to %d", snapReq.index) + start := time.Now() + version := getSnapshotVersion(r.protocolVersion) + sink, err := r.snapshots.Create(version, snapReq.index, snapReq.term, committed, committedIndex, r.trans) + if err != nil { + return "", fmt.Errorf("failed to create snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "create"}, start) + + // Try to persist the snapshot. + start = time.Now() + if err := snapReq.snapshot.Persist(sink); err != nil { + sink.Cancel() + return "", fmt.Errorf("failed to persist snapshot: %v", err) + } + metrics.MeasureSince([]string{"raft", "snapshot", "persist"}, start) + + // Close and check for error. + if err := sink.Close(); err != nil { + return "", fmt.Errorf("failed to close snapshot: %v", err) + } + + // Update the last stable snapshot info. + r.setLastSnapshot(snapReq.index, snapReq.term) + + // Compact the logs. + if err := r.compactLogs(snapReq.index); err != nil { + return "", err + } + + r.logger.Printf("[INFO] raft: Snapshot to %d complete", snapReq.index) + return sink.ID(), nil +} + +// compactLogs takes the last inclusive index of a snapshot +// and trims the logs that are no longer needed. +func (r *Raft) compactLogs(snapIdx uint64) error { + defer metrics.MeasureSince([]string{"raft", "compactLogs"}, time.Now()) + // Determine log ranges to compact + minLog, err := r.logs.FirstIndex() + if err != nil { + return fmt.Errorf("failed to get first log index: %v", err) + } + + // Check if we have enough logs to truncate + lastLogIdx, _ := r.getLastLog() + if lastLogIdx <= r.conf.TrailingLogs { + return nil + } + + // Truncate up to the end of the snapshot, or `TrailingLogs` + // back from the head, which ever is further back. This ensures + // at least `TrailingLogs` entries, but does not allow logs + // after the snapshot to be removed. + maxLog := min(snapIdx, lastLogIdx-r.conf.TrailingLogs) + + // Log this + r.logger.Printf("[INFO] raft: Compacting logs from %d to %d", minLog, maxLog) + + // Compact the logs + if err := r.logs.DeleteRange(minLog, maxLog); err != nil { + return fmt.Errorf("log compaction failed: %v", err) + } + return nil +} diff --git a/vendor/github.com/hashicorp/raft/stable.go b/vendor/github.com/hashicorp/raft/stable.go new file mode 100644 index 0000000..ff59a8c --- /dev/null +++ b/vendor/github.com/hashicorp/raft/stable.go @@ -0,0 +1,15 @@ +package raft + +// StableStore is used to provide stable storage +// of key configurations to ensure safety. +type StableStore interface { + Set(key []byte, val []byte) error + + // Get returns the value for key, or an empty byte slice if key was not found. + Get(key []byte) ([]byte, error) + + SetUint64(key []byte, val uint64) error + + // GetUint64 returns the uint64 value for key, or 0 if key was not found. + GetUint64(key []byte) (uint64, error) +} diff --git a/vendor/github.com/hashicorp/raft/state.go b/vendor/github.com/hashicorp/raft/state.go new file mode 100644 index 0000000..a58cd0d --- /dev/null +++ b/vendor/github.com/hashicorp/raft/state.go @@ -0,0 +1,171 @@ +package raft + +import ( + "sync" + "sync/atomic" +) + +// RaftState captures the state of a Raft node: Follower, Candidate, Leader, +// or Shutdown. +type RaftState uint32 + +const ( + // Follower is the initial state of a Raft node. + Follower RaftState = iota + + // Candidate is one of the valid states of a Raft node. + Candidate + + // Leader is one of the valid states of a Raft node. + Leader + + // Shutdown is the terminal state of a Raft node. + Shutdown +) + +func (s RaftState) String() string { + switch s { + case Follower: + return "Follower" + case Candidate: + return "Candidate" + case Leader: + return "Leader" + case Shutdown: + return "Shutdown" + default: + return "Unknown" + } +} + +// raftState is used to maintain various state variables +// and provides an interface to set/get the variables in a +// thread safe manner. +type raftState struct { + // currentTerm commitIndex, lastApplied, must be kept at the top of + // the struct so they're 64 bit aligned which is a requirement for + // atomic ops on 32 bit platforms. + + // The current term, cache of StableStore + currentTerm uint64 + + // Highest committed log entry + commitIndex uint64 + + // Last applied log to the FSM + lastApplied uint64 + + // protects 4 next fields + lastLock sync.Mutex + + // Cache the latest snapshot index/term + lastSnapshotIndex uint64 + lastSnapshotTerm uint64 + + // Cache the latest log from LogStore + lastLogIndex uint64 + lastLogTerm uint64 + + // Tracks running goroutines + routinesGroup sync.WaitGroup + + // The current state + state RaftState +} + +func (r *raftState) getState() RaftState { + stateAddr := (*uint32)(&r.state) + return RaftState(atomic.LoadUint32(stateAddr)) +} + +func (r *raftState) setState(s RaftState) { + stateAddr := (*uint32)(&r.state) + atomic.StoreUint32(stateAddr, uint32(s)) +} + +func (r *raftState) getCurrentTerm() uint64 { + return atomic.LoadUint64(&r.currentTerm) +} + +func (r *raftState) setCurrentTerm(term uint64) { + atomic.StoreUint64(&r.currentTerm, term) +} + +func (r *raftState) getLastLog() (index, term uint64) { + r.lastLock.Lock() + index = r.lastLogIndex + term = r.lastLogTerm + r.lastLock.Unlock() + return +} + +func (r *raftState) setLastLog(index, term uint64) { + r.lastLock.Lock() + r.lastLogIndex = index + r.lastLogTerm = term + r.lastLock.Unlock() +} + +func (r *raftState) getLastSnapshot() (index, term uint64) { + r.lastLock.Lock() + index = r.lastSnapshotIndex + term = r.lastSnapshotTerm + r.lastLock.Unlock() + return +} + +func (r *raftState) setLastSnapshot(index, term uint64) { + r.lastLock.Lock() + r.lastSnapshotIndex = index + r.lastSnapshotTerm = term + r.lastLock.Unlock() +} + +func (r *raftState) getCommitIndex() uint64 { + return atomic.LoadUint64(&r.commitIndex) +} + +func (r *raftState) setCommitIndex(index uint64) { + atomic.StoreUint64(&r.commitIndex, index) +} + +func (r *raftState) getLastApplied() uint64 { + return atomic.LoadUint64(&r.lastApplied) +} + +func (r *raftState) setLastApplied(index uint64) { + atomic.StoreUint64(&r.lastApplied, index) +} + +// Start a goroutine and properly handle the race between a routine +// starting and incrementing, and exiting and decrementing. +func (r *raftState) goFunc(f func()) { + r.routinesGroup.Add(1) + go func() { + defer r.routinesGroup.Done() + f() + }() +} + +func (r *raftState) waitShutdown() { + r.routinesGroup.Wait() +} + +// getLastIndex returns the last index in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastIndex() uint64 { + r.lastLock.Lock() + defer r.lastLock.Unlock() + return max(r.lastLogIndex, r.lastSnapshotIndex) +} + +// getLastEntry returns the last index and term in stable storage. +// Either from the last log or from the last snapshot. +func (r *raftState) getLastEntry() (uint64, uint64) { + r.lastLock.Lock() + defer r.lastLock.Unlock() + if r.lastLogIndex >= r.lastSnapshotIndex { + return r.lastLogIndex, r.lastLogTerm + } + return r.lastSnapshotIndex, r.lastSnapshotTerm +} diff --git a/vendor/github.com/hashicorp/raft/tag.sh b/vendor/github.com/hashicorp/raft/tag.sh new file mode 100644 index 0000000..cd16623 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/tag.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -e + +# The version must be supplied from the environment. Do not include the +# leading "v". +if [ -z $VERSION ]; then + echo "Please specify a version." + exit 1 +fi + +# Generate the tag. +echo "==> Tagging version $VERSION..." +git commit --allow-empty -a --gpg-sign=348FFC4C -m "Release v$VERSION" +git tag -a -m "Version $VERSION" -s -u 348FFC4C "v${VERSION}" master + +exit 0 diff --git a/vendor/github.com/hashicorp/raft/tcp_transport.go b/vendor/github.com/hashicorp/raft/tcp_transport.go new file mode 100644 index 0000000..69c928e --- /dev/null +++ b/vendor/github.com/hashicorp/raft/tcp_transport.go @@ -0,0 +1,116 @@ +package raft + +import ( + "errors" + "io" + "log" + "net" + "time" +) + +var ( + errNotAdvertisable = errors.New("local bind address is not advertisable") + errNotTCP = errors.New("local address is not a TCP address") +) + +// TCPStreamLayer implements StreamLayer interface for plain TCP. +type TCPStreamLayer struct { + advertise net.Addr + listener *net.TCPListener +} + +// NewTCPTransport returns a NetworkTransport that is built on top of +// a TCP streaming transport layer. +func NewTCPTransport( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logOutput io.Writer, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport { + return NewNetworkTransport(stream, maxPool, timeout, logOutput) + }) +} + +// NewTCPTransportWithLogger returns a NetworkTransport that is built on top of +// a TCP streaming transport layer, with log output going to the supplied Logger +func NewTCPTransportWithLogger( + bindAddr string, + advertise net.Addr, + maxPool int, + timeout time.Duration, + logger *log.Logger, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport { + return NewNetworkTransportWithLogger(stream, maxPool, timeout, logger) + }) +} + +// NewTCPTransportWithConfig returns a NetworkTransport that is built on top of +// a TCP streaming transport layer, using the given config struct. +func NewTCPTransportWithConfig( + bindAddr string, + advertise net.Addr, + config *NetworkTransportConfig, +) (*NetworkTransport, error) { + return newTCPTransport(bindAddr, advertise, func(stream StreamLayer) *NetworkTransport { + config.Stream = stream + return NewNetworkTransportWithConfig(config) + }) +} + +func newTCPTransport(bindAddr string, + advertise net.Addr, + transportCreator func(stream StreamLayer) *NetworkTransport) (*NetworkTransport, error) { + // Try to bind + list, err := net.Listen("tcp", bindAddr) + if err != nil { + return nil, err + } + + // Create stream + stream := &TCPStreamLayer{ + advertise: advertise, + listener: list.(*net.TCPListener), + } + + // Verify that we have a usable advertise address + addr, ok := stream.Addr().(*net.TCPAddr) + if !ok { + list.Close() + return nil, errNotTCP + } + if addr.IP.IsUnspecified() { + list.Close() + return nil, errNotAdvertisable + } + + // Create the network transport + trans := transportCreator(stream) + return trans, nil +} + +// Dial implements the StreamLayer interface. +func (t *TCPStreamLayer) Dial(address ServerAddress, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", string(address), timeout) +} + +// Accept implements the net.Listener interface. +func (t *TCPStreamLayer) Accept() (c net.Conn, err error) { + return t.listener.Accept() +} + +// Close implements the net.Listener interface. +func (t *TCPStreamLayer) Close() (err error) { + return t.listener.Close() +} + +// Addr implements the net.Listener interface. +func (t *TCPStreamLayer) Addr() net.Addr { + // Use an advertise addr if provided + if t.advertise != nil { + return t.advertise + } + return t.listener.Addr() +} diff --git a/vendor/github.com/hashicorp/raft/transport.go b/vendor/github.com/hashicorp/raft/transport.go new file mode 100644 index 0000000..85459b2 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/transport.go @@ -0,0 +1,124 @@ +package raft + +import ( + "io" + "time" +) + +// RPCResponse captures both a response and a potential error. +type RPCResponse struct { + Response interface{} + Error error +} + +// RPC has a command, and provides a response mechanism. +type RPC struct { + Command interface{} + Reader io.Reader // Set only for InstallSnapshot + RespChan chan<- RPCResponse +} + +// Respond is used to respond with a response, error or both +func (r *RPC) Respond(resp interface{}, err error) { + r.RespChan <- RPCResponse{resp, err} +} + +// Transport provides an interface for network transports +// to allow Raft to communicate with other nodes. +type Transport interface { + // Consumer returns a channel that can be used to + // consume and respond to RPC requests. + Consumer() <-chan RPC + + // LocalAddr is used to return our local address to distinguish from our peers. + LocalAddr() ServerAddress + + // AppendEntriesPipeline returns an interface that can be used to pipeline + // AppendEntries requests. + AppendEntriesPipeline(id ServerID, target ServerAddress) (AppendPipeline, error) + + // AppendEntries sends the appropriate RPC to the target node. + AppendEntries(id ServerID, target ServerAddress, args *AppendEntriesRequest, resp *AppendEntriesResponse) error + + // RequestVote sends the appropriate RPC to the target node. + RequestVote(id ServerID, target ServerAddress, args *RequestVoteRequest, resp *RequestVoteResponse) error + + // InstallSnapshot is used to push a snapshot down to a follower. The data is read from + // the ReadCloser and streamed to the client. + InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error + + // EncodePeer is used to serialize a peer's address. + EncodePeer(id ServerID, addr ServerAddress) []byte + + // DecodePeer is used to deserialize a peer's address. + DecodePeer([]byte) ServerAddress + + // SetHeartbeatHandler is used to setup a heartbeat handler + // as a fast-pass. This is to avoid head-of-line blocking from + // disk IO. If a Transport does not support this, it can simply + // ignore the call, and push the heartbeat onto the Consumer channel. + SetHeartbeatHandler(cb func(rpc RPC)) +} + +// WithClose is an interface that a transport may provide which +// allows a transport to be shut down cleanly when a Raft instance +// shuts down. +// +// It is defined separately from Transport as unfortunately it wasn't in the +// original interface specification. +type WithClose interface { + // Close permanently closes a transport, stopping + // any associated goroutines and freeing other resources. + Close() error +} + +// LoopbackTransport is an interface that provides a loopback transport suitable for testing +// e.g. InmemTransport. It's there so we don't have to rewrite tests. +type LoopbackTransport interface { + Transport // Embedded transport reference + WithPeers // Embedded peer management + WithClose // with a close routine +} + +// WithPeers is an interface that a transport may provide which allows for connection and +// disconnection. Unless the transport is a loopback transport, the transport specified to +// "Connect" is likely to be nil. +type WithPeers interface { + Connect(peer ServerAddress, t Transport) // Connect a peer + Disconnect(peer ServerAddress) // Disconnect a given peer + DisconnectAll() // Disconnect all peers, possibly to reconnect them later +} + +// AppendPipeline is used for pipelining AppendEntries requests. It is used +// to increase the replication throughput by masking latency and better +// utilizing bandwidth. +type AppendPipeline interface { + // AppendEntries is used to add another request to the pipeline. + // The send may block which is an effective form of back-pressure. + AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) + + // Consumer returns a channel that can be used to consume + // response futures when they are ready. + Consumer() <-chan AppendFuture + + // Close closes the pipeline and cancels all inflight RPCs + Close() error +} + +// AppendFuture is used to return information about a pipelined AppendEntries request. +type AppendFuture interface { + Future + + // Start returns the time that the append request was started. + // It is always OK to call this method. + Start() time.Time + + // Request holds the parameters of the AppendEntries call. + // It is always OK to call this method. + Request() *AppendEntriesRequest + + // Response holds the results of the AppendEntries call. + // This method must only be called after the Error + // method returns, and will only be valid on success. + Response() *AppendEntriesResponse +} diff --git a/vendor/github.com/hashicorp/raft/util.go b/vendor/github.com/hashicorp/raft/util.go new file mode 100644 index 0000000..90428d7 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/util.go @@ -0,0 +1,133 @@ +package raft + +import ( + "bytes" + crand "crypto/rand" + "fmt" + "math" + "math/big" + "math/rand" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +func init() { + // Ensure we use a high-entropy seed for the psuedo-random generator + rand.Seed(newSeed()) +} + +// returns an int64 from a crypto random source +// can be used to seed a source for a math/rand. +func newSeed() int64 { + r, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + return r.Int64() +} + +// randomTimeout returns a value that is between the minVal and 2x minVal. +func randomTimeout(minVal time.Duration) <-chan time.Time { + if minVal == 0 { + return nil + } + extra := (time.Duration(rand.Int63()) % minVal) + return time.After(minVal + extra) +} + +// min returns the minimum. +func min(a, b uint64) uint64 { + if a <= b { + return a + } + return b +} + +// max returns the maximum. +func max(a, b uint64) uint64 { + if a >= b { + return a + } + return b +} + +// generateUUID is used to generate a random UUID. +func generateUUID() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +// asyncNotifyCh is used to do an async channel send +// to a single channel without blocking. +func asyncNotifyCh(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// drainNotifyCh empties out a single-item notification channel without +// blocking, and returns whether it received anything. +func drainNotifyCh(ch chan struct{}) bool { + select { + case <-ch: + return true + default: + return false + } +} + +// asyncNotifyBool is used to do an async notification +// on a bool channel. +func asyncNotifyBool(ch chan bool, v bool) { + select { + case ch <- v: + default: + } +} + +// Decode reverses the encode operation on a byte slice input. +func decodeMsgPack(buf []byte, out interface{}) error { + r := bytes.NewBuffer(buf) + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(r, &hd) + return dec.Decode(out) +} + +// Encode writes an encoded object to a new bytes buffer. +func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(buf, &hd) + err := enc.Encode(in) + return buf, err +} + +// backoff is used to compute an exponential backoff +// duration. Base time is scaled by the current round, +// up to some maximum scale factor. +func backoff(base time.Duration, round, limit uint64) time.Duration { + power := min(round, limit) + for power > 2 { + base *= 2 + power-- + } + return base +} + +// Needed for sorting []uint64, used to determine commitment +type uint64Slice []uint64 + +func (p uint64Slice) Len() int { return len(p) } +func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE new file mode 100644 index 0000000..c33dcc7 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go new file mode 100644 index 0000000..3582ee4 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/client.go @@ -0,0 +1,243 @@ +package coordinate + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/armon/go-metrics" +) + +// Client manages the estimated network coordinate for a given node, and adjusts +// it as the node observes round trip times and estimated coordinates from other +// nodes. The core algorithm is based on Vivaldi, see the documentation for Config +// for more details. +type Client struct { + // coord is the current estimate of the client's network coordinate. + coord *Coordinate + + // origin is a coordinate sitting at the origin. + origin *Coordinate + + // config contains the tuning parameters that govern the performance of + // the algorithm. + config *Config + + // adjustmentIndex is the current index into the adjustmentSamples slice. + adjustmentIndex uint + + // adjustment is used to store samples for the adjustment calculation. + adjustmentSamples []float64 + + // latencyFilterSamples is used to store the last several RTT samples, + // keyed by node name. We will use the config's LatencyFilterSamples + // value to determine how many samples we keep, per node. + latencyFilterSamples map[string][]float64 + + // stats is used to record events that occur when updating coordinates. + stats ClientStats + + // mutex enables safe concurrent access to the client. + mutex sync.RWMutex +} + +// ClientStats is used to record events that occur when updating coordinates. +type ClientStats struct { + // Resets is incremented any time we reset our local coordinate because + // our calculations have resulted in an invalid state. + Resets int +} + +// NewClient creates a new Client and verifies the configuration is valid. +func NewClient(config *Config) (*Client, error) { + if !(config.Dimensionality > 0) { + return nil, fmt.Errorf("dimensionality must be >0") + } + + return &Client{ + coord: NewCoordinate(config), + origin: NewCoordinate(config), + config: config, + adjustmentIndex: 0, + adjustmentSamples: make([]float64, config.AdjustmentWindowSize), + latencyFilterSamples: make(map[string][]float64), + }, nil +} + +// GetCoordinate returns a copy of the coordinate for this client. +func (c *Client) GetCoordinate() *Coordinate { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.Clone() +} + +// SetCoordinate forces the client's coordinate to a known state. +func (c *Client) SetCoordinate(coord *Coordinate) error { + c.mutex.Lock() + defer c.mutex.Unlock() + + if err := c.checkCoordinate(coord); err != nil { + return err + } + + c.coord = coord.Clone() + return nil +} + +// ForgetNode removes any client state for the given node. +func (c *Client) ForgetNode(node string) { + c.mutex.Lock() + defer c.mutex.Unlock() + + delete(c.latencyFilterSamples, node) +} + +// Stats returns a copy of stats for the client. +func (c *Client) Stats() ClientStats { + c.mutex.Lock() + defer c.mutex.Unlock() + + return c.stats +} + +// checkCoordinate returns an error if the coordinate isn't compatible with +// this client, or if the coordinate itself isn't valid. This assumes the mutex +// has been locked already. +func (c *Client) checkCoordinate(coord *Coordinate) error { + if !c.coord.IsCompatibleWith(coord) { + return fmt.Errorf("dimensions aren't compatible") + } + + if !coord.IsValid() { + return fmt.Errorf("coordinate is invalid") + } + + return nil +} + +// latencyFilter applies a simple moving median filter with a new sample for +// a node. This assumes that the mutex has been locked already. +func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { + samples, ok := c.latencyFilterSamples[node] + if !ok { + samples = make([]float64, 0, c.config.LatencyFilterSize) + } + + // Add the new sample and trim the list, if needed. + samples = append(samples, rttSeconds) + if len(samples) > int(c.config.LatencyFilterSize) { + samples = samples[1:] + } + c.latencyFilterSamples[node] = samples + + // Sort a copy of the samples and return the median. + sorted := make([]float64, len(samples)) + copy(sorted, samples) + sort.Float64s(sorted) + return sorted[len(sorted)/2] +} + +// updateVivialdi updates the Vivaldi portion of the client's coordinate. This +// assumes that the mutex has been locked already. +func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { + const zeroThreshold = 1.0e-6 + + dist := c.coord.DistanceTo(other).Seconds() + if rttSeconds < zeroThreshold { + rttSeconds = zeroThreshold + } + wrongness := math.Abs(dist-rttSeconds) / rttSeconds + + totalError := c.coord.Error + other.Error + if totalError < zeroThreshold { + totalError = zeroThreshold + } + weight := c.coord.Error / totalError + + c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) + if c.coord.Error > c.config.VivaldiErrorMax { + c.coord.Error = c.config.VivaldiErrorMax + } + + delta := c.config.VivaldiCC * weight + force := delta * (rttSeconds - dist) + c.coord = c.coord.ApplyForce(c.config, force, other) +} + +// updateAdjustment updates the adjustment portion of the client's coordinate, if +// the feature is enabled. This assumes that the mutex has been locked already. +func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { + if c.config.AdjustmentWindowSize == 0 { + return + } + + // Note that the existing adjustment factors don't figure in to this + // calculation so we use the raw distance here. + dist := c.coord.rawDistanceTo(other) + c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist + c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize + + sum := 0.0 + for _, sample := range c.adjustmentSamples { + sum += sample + } + c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) +} + +// updateGravity applies a small amount of gravity to pull coordinates towards +// the center of the coordinate system to combat drift. This assumes that the +// mutex is locked already. +func (c *Client) updateGravity() { + dist := c.origin.DistanceTo(c.coord).Seconds() + force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) + c.coord = c.coord.ApplyForce(c.config, force, c.origin) +} + +// Update takes other, a coordinate for another node, and rtt, a round trip +// time observation for a ping to that node, and updates the estimated position of +// the client's coordinate. Returns the updated coordinate. +func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + if err := c.checkCoordinate(other); err != nil { + return nil, err + } + + // The code down below can handle zero RTTs, which we have seen in + // https://github.com/hashicorp/consul/issues/3789, presumably in + // environments with coarse-grained monotonic clocks (we are still + // trying to pin this down). In any event, this is ok from a code PoV + // so we don't need to alert operators with spammy messages. We did + // add a counter so this is still observable, though. + const maxRTT = 10 * time.Second + if rtt < 0 || rtt > maxRTT { + return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT) + } + if rtt == 0 { + metrics.IncrCounter([]string{"serf", "coordinate", "zero-rtt"}, 1) + } + + rttSeconds := c.latencyFilter(node, rtt.Seconds()) + c.updateVivaldi(other, rttSeconds) + c.updateAdjustment(other, rttSeconds) + c.updateGravity() + if !c.coord.IsValid() { + c.stats.Resets++ + c.coord = NewCoordinate(c.config) + } + + return c.coord.Clone(), nil +} + +// DistanceTo returns the estimated RTT from the client's coordinate to other, the +// coordinate for another node. +func (c *Client) DistanceTo(other *Coordinate) time.Duration { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.DistanceTo(other) +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go new file mode 100644 index 0000000..b85a8ab --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/config.go @@ -0,0 +1,70 @@ +package coordinate + +// Config is used to set the parameters of the Vivaldi-based coordinate mapping +// algorithm. +// +// The following references are called out at various points in the documentation +// here: +// +// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." +// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. +// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates +// in the Wild." NSDI. Vol. 7. 2007. +// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for +// host-based network coordinate systems." Networking, IEEE/ACM Transactions +// on 18.1 (2010): 27-40. +type Config struct { + // The dimensionality of the coordinate system. As discussed in [2], more + // dimensions improves the accuracy of the estimates up to a point. Per [2] + // we chose 8 dimensions plus a non-Euclidean height. + Dimensionality uint + + // VivaldiErrorMax is the default error value when a node hasn't yet made + // any observations. It also serves as an upper limit on the error value in + // case observations cause the error value to increase without bound. + VivaldiErrorMax float64 + + // VivaldiCE is a tuning factor that controls the maximum impact an + // observation can have on a node's confidence. See [1] for more details. + VivaldiCE float64 + + // VivaldiCC is a tuning factor that controls the maximum impact an + // observation can have on a node's coordinate. See [1] for more details. + VivaldiCC float64 + + // AdjustmentWindowSize is a tuning factor that determines how many samples + // we retain to calculate the adjustment factor as discussed in [3]. Setting + // this to zero disables this feature. + AdjustmentWindowSize uint + + // HeightMin is the minimum value of the height parameter. Since this + // always must be positive, it will introduce a small amount error, so + // the chosen value should be relatively small compared to "normal" + // coordinates. + HeightMin float64 + + // LatencyFilterSamples is the maximum number of samples that are retained + // per node, in order to compute a median. The intent is to ride out blips + // but still keep the delay low, since our time to probe any given node is + // pretty infrequent. See [2] for more details. + LatencyFilterSize uint + + // GravityRho is a tuning factor that sets how much gravity has an effect + // to try to re-center coordinates. See [2] for more details. + GravityRho float64 +} + +// DefaultConfig returns a Config that has some default values suitable for +// basic testing of the algorithm, but not tuned to any particular type of cluster. +func DefaultConfig() *Config { + return &Config{ + Dimensionality: 8, + VivaldiErrorMax: 1.5, + VivaldiCE: 0.25, + VivaldiCC: 0.25, + AdjustmentWindowSize: 20, + HeightMin: 10.0e-6, + LatencyFilterSize: 3, + GravityRho: 150.0, + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go new file mode 100644 index 0000000..fbe792c --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go @@ -0,0 +1,203 @@ +package coordinate + +import ( + "math" + "math/rand" + "time" +) + +// Coordinate is a specialized structure for holding network coordinates for the +// Vivaldi-based coordinate mapping algorithm. All of the fields should be public +// to enable this to be serialized. All values in here are in units of seconds. +type Coordinate struct { + // Vec is the Euclidean portion of the coordinate. This is used along + // with the other fields to provide an overall distance estimate. The + // units here are seconds. + Vec []float64 + + // Err reflects the confidence in the given coordinate and is updated + // dynamically by the Vivaldi Client. This is dimensionless. + Error float64 + + // Adjustment is a distance offset computed based on a calculation over + // observations from all other nodes over a fixed window and is updated + // dynamically by the Vivaldi Client. The units here are seconds. + Adjustment float64 + + // Height is a distance offset that accounts for non-Euclidean effects + // which model the access links from nodes to the core Internet. The access + // links are usually set by bandwidth and congestion, and the core links + // usually follow distance based on geography. + Height float64 +} + +const ( + // secondsToNanoseconds is used to convert float seconds to nanoseconds. + secondsToNanoseconds = 1.0e9 + + // zeroThreshold is used to decide if two coordinates are on top of each + // other. + zeroThreshold = 1.0e-6 +) + +// ErrDimensionalityConflict will be panic-d if you try to perform operations +// with incompatible dimensions. +type DimensionalityConflictError struct{} + +// Adds the error interface. +func (e DimensionalityConflictError) Error() string { + return "coordinate dimensionality does not match" +} + +// NewCoordinate creates a new coordinate at the origin, using the given config +// to supply key initial values. +func NewCoordinate(config *Config) *Coordinate { + return &Coordinate{ + Vec: make([]float64, config.Dimensionality), + Error: config.VivaldiErrorMax, + Adjustment: 0.0, + Height: config.HeightMin, + } +} + +// Clone creates an independent copy of this coordinate. +func (c *Coordinate) Clone() *Coordinate { + vec := make([]float64, len(c.Vec)) + copy(vec, c.Vec) + return &Coordinate{ + Vec: vec, + Error: c.Error, + Adjustment: c.Adjustment, + Height: c.Height, + } +} + +// componentIsValid returns false if a floating point value is a NaN or an +// infinity. +func componentIsValid(f float64) bool { + return !math.IsInf(f, 0) && !math.IsNaN(f) +} + +// IsValid returns false if any component of a coordinate isn't valid, per the +// componentIsValid() helper above. +func (c *Coordinate) IsValid() bool { + for i := range c.Vec { + if !componentIsValid(c.Vec[i]) { + return false + } + } + + return componentIsValid(c.Error) && + componentIsValid(c.Adjustment) && + componentIsValid(c.Height) +} + +// IsCompatibleWith checks to see if the two coordinates are compatible +// dimensionally. If this returns true then you are guaranteed to not get +// any runtime errors operating on them. +func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { + return len(c.Vec) == len(other.Vec) +} + +// ApplyForce returns the result of applying the force from the direction of the +// other coordinate. +func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + ret := c.Clone() + unit, mag := unitVectorAt(c.Vec, other.Vec) + ret.Vec = add(ret.Vec, mul(unit, force)) + if mag > zeroThreshold { + ret.Height = (ret.Height+other.Height)*force/mag + ret.Height + ret.Height = math.Max(ret.Height, config.HeightMin) + } + return ret +} + +// DistanceTo returns the distance between this coordinate and the other +// coordinate, including adjustments. +func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + dist := c.rawDistanceTo(other) + adjustedDist := dist + c.Adjustment + other.Adjustment + if adjustedDist > 0.0 { + dist = adjustedDist + } + return time.Duration(dist * secondsToNanoseconds) +} + +// rawDistanceTo returns the Vivaldi distance between this coordinate and the +// other coordinate in seconds, not including adjustments. This assumes the +// dimensions have already been checked to be compatible. +func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { + return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height +} + +// add returns the sum of vec1 and vec2. This assumes the dimensions have +// already been checked to be compatible. +func add(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i := range ret { + ret[i] = vec1[i] + vec2[i] + } + return ret +} + +// diff returns the difference between the vec1 and vec2. This assumes the +// dimensions have already been checked to be compatible. +func diff(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i := range ret { + ret[i] = vec1[i] - vec2[i] + } + return ret +} + +// mul returns vec multiplied by a scalar factor. +func mul(vec []float64, factor float64) []float64 { + ret := make([]float64, len(vec)) + for i := range vec { + ret[i] = vec[i] * factor + } + return ret +} + +// magnitude computes the magnitude of the vec. +func magnitude(vec []float64) float64 { + sum := 0.0 + for i := range vec { + sum += vec[i] * vec[i] + } + return math.Sqrt(sum) +} + +// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two +// positions are the same then a random unit vector is returned. We also return +// the distance between the points for use in the later height calculation. +func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { + ret := diff(vec1, vec2) + + // If the coordinates aren't on top of each other we can normalize. + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), mag + } + + // Otherwise, just return a random unit vector. + for i := range ret { + ret[i] = rand.Float64() - 0.5 + } + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), 0.0 + } + + // And finally just give up and make a unit vector along the first + // dimension. This should be exceedingly rare. + ret = make([]float64, len(ret)) + ret[0] = 1.0 + return ret, 0.0 +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go new file mode 100644 index 0000000..6fb033c --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/phantom.go @@ -0,0 +1,187 @@ +package coordinate + +import ( + "fmt" + "math" + "math/rand" + "time" +) + +// GenerateClients returns a slice with nodes number of clients, all with the +// given config. +func GenerateClients(nodes int, config *Config) ([]*Client, error) { + clients := make([]*Client, nodes) + for i, _ := range clients { + client, err := NewClient(config) + if err != nil { + return nil, err + } + + clients[i] = client + } + return clients, nil +} + +// GenerateLine returns a truth matrix as if all the nodes are in a straight linke +// with the given spacing between them. +func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := time.Duration(j-i) * spacing + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional +// grid with the given spacing between them. +func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + n := int(math.Sqrt(float64(nodes))) + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + x1, y1 := float64(i%n), float64(i/n) + x2, y2 := float64(j%n), float64(j/n) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt := time.Duration(dist * float64(spacing)) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateSplit returns a truth matrix as if half the nodes are close together in +// one location and half the nodes are close together in another. The lan factor +// is used to separate the nodes locally and the wan factor represents the split +// between the two sides. +func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + split := nodes / 2 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := lan + if (i <= split && j > split) || (i > split && j <= split) { + rtt += wan + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed +// around a circle with the given radius. The first node is at the "center" of the +// circle because it's equidistant from all the other nodes, but we place it at +// double the radius, so it should show up above all the other nodes in height. +func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + var rtt time.Duration + if i == 0 { + rtt = 2 * radius + } else { + t1 := 2.0 * math.Pi * float64(i) / float64(nodes) + x1, y1 := math.Cos(t1), math.Sin(t1) + t2 := 2.0 * math.Pi * float64(j) / float64(nodes) + x2, y2 := math.Cos(t2), math.Sin(t2) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt = time.Duration(dist * float64(radius)) + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateRandom returns a truth matrix for a set of nodes with normally +// distributed delays, with the given mean and deviation. The RNG is re-seeded +// so you always get the same matrix for a given size. +func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { + rand.Seed(1) + + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() + rtt := time.Duration(rttSeconds * secondsToNanoseconds) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// Simulate runs the given number of cycles using the given list of clients and +// truth matrix. On each cycle, each client will pick a random node and observe +// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for +// each simulation run to get deterministic results (for this algorithm and the +// underlying algorithm which will use random numbers for position vectors when +// starting out with everything at the origin). +func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { + rand.Seed(1) + + nodes := len(clients) + for cycle := 0; cycle < cycles; cycle++ { + for i, _ := range clients { + if j := rand.Intn(nodes); j != i { + c := clients[j].GetCoordinate() + rtt := truth[i][j] + node := fmt.Sprintf("node_%d", j) + clients[i].Update(node, c, rtt) + } + } + } +} + +// Stats is returned from the Evaluate function with a summary of the algorithm +// performance. +type Stats struct { + ErrorMax float64 + ErrorAvg float64 +} + +// Evaluate uses the coordinates of the given clients to calculate estimated +// distances and compares them with the given truth matrix, returning summary +// stats. +func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { + nodes := len(clients) + count := 0 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() + actual := truth[i][j].Seconds() + error := math.Abs(est-actual) / actual + stats.ErrorMax = math.Max(stats.ErrorMax, error) + stats.ErrorAvg += error + count += 1 + } + } + + stats.ErrorAvg /= float64(count) + fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) + return +} diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/yamux/.gitignore new file mode 100644 index 0000000..8365624 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE new file mode 100644 index 0000000..f0e5c79 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md new file mode 100644 index 0000000..d4db7fc --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/README.md @@ -0,0 +1,86 @@ +# Yamux + +Yamux (Yet another Multiplexer) is a multiplexing library for Golang. +It relies on an underlying connection to provide reliability +and ordering, such as TCP or Unix domain sockets, and provides +stream-oriented multiplexing. It is inspired by SPDY but is not +interoperable with it. + +Yamux features include: + +* Bi-directional streams + * Streams can be opened by either client or server + * Useful for NAT traversal + * Server-side push support +* Flow control + * Avoid starvation + * Back-pressure to prevent overwhelming a receiver +* Keep Alives + * Enables persistent connections over a load balancer +* Efficient + * Enables thousands of logical streams with low overhead + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). + +## Specification + +The full specification for Yamux is provided in the `spec.md` file. +It can be used as a guide to implementors of interoperable libraries. + +## Usage + +Using Yamux is remarkably simple: + +```go + +func client() { + // Get a TCP connection + conn, err := net.Dial(...) + if err != nil { + panic(err) + } + + // Setup client side of yamux + session, err := yamux.Client(conn, nil) + if err != nil { + panic(err) + } + + // Open a new stream + stream, err := session.Open() + if err != nil { + panic(err) + } + + // Stream implements net.Conn + stream.Write([]byte("ping")) +} + +func server() { + // Accept a TCP connection + conn, err := listener.Accept() + if err != nil { + panic(err) + } + + // Setup server side of yamux + session, err := yamux.Server(conn, nil) + if err != nil { + panic(err) + } + + // Accept a stream + stream, err := session.Accept() + if err != nil { + panic(err) + } + + // Listen for a message + buf := make([]byte, 4) + stream.Read(buf) +} + +``` + diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go new file mode 100644 index 0000000..be6ebca --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// LocalAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go new file mode 100644 index 0000000..4f52938 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -0,0 +1,157 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = fmt.Errorf("i/o deadline reached") + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/vendor/github.com/hashicorp/yamux/go.mod b/vendor/github.com/hashicorp/yamux/go.mod new file mode 100644 index 0000000..672a0e5 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/yamux diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go new file mode 100644 index 0000000..18a078c --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -0,0 +1,98 @@ +package yamux + +import ( + "fmt" + "io" + "log" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // LogOutput is used to control the log destination. Either Logger or + // LogOutput can be set, not both. + LogOutput io.Writer + + // Logger is used to pass in the logger to be used. Either Logger or + // LogOutput can be set, not both. + Logger *log.Logger +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + if config.LogOutput != nil && config.Logger != nil { + return fmt.Errorf("both Logger and LogOutput may not be set, select one") + } else if config.LogOutput == nil && config.Logger == nil { + return fmt.Errorf("one of Logger or LogOutput must be set, select one") + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go new file mode 100644 index 0000000..a80ddec --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -0,0 +1,653 @@ +package yamux + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream, and inflight has an entry + // for any outgoing stream that has not yet been established. Both are + // protected by streamLock. + streams map[uint32]*Stream + inflight map[uint32]struct{} + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + Body io.Reader + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + logger := config.Logger + if logger == nil { + logger = log.New(config.LogOutput, "", log.LstdFlags) + } + + s := &Session{ + config: config, + logger: logger, + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + inflight: make(map[uint32]struct{}), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan sendReady, 64), + recvDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// CloseChan returns a read-only channel which is closed as +// soon as the session is closed. +func (s *Session) CloseChan() <-chan struct{} { + return s.shutdownCh +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get an ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.inflight[id] = struct{}{} + s.streamLock.Unlock() + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") + } + return nil, err + } + return stream, nil +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + close(s.shutdownCh) + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + if err != ErrSessionShutdown { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + } + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body io.Reader) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + select { + case s.sendCh <- sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + for { + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + sent := 0 + for sent < len(ready.Hdr) { + n, err := s.conn.Write(ready.Hdr[sent:]) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + sent += n + } + } + + // Send data from a body if given + if ready.Body != nil { + _, err := io.Copy(s.conn, ready.Body) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type +var ( + handlers = []func(*Session, header) error{ + typeData: (*Session).handleStreamMessage, + typeWindowUpdate: (*Session).handleStreamMessage, + typePing: (*Session).handlePing, + typeGoAway: (*Session).handleGoAway, + } +) + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + mt := hdr.MsgType() + if mt < typeData || mt > typeGoAway { + return ErrInvalidMsgType + } + + if err := handlers[mt](s, hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(stream.sendHdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. If there was an in-flight SYN and the stream +// was not yet established, then this will give the credit back. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: SYN tracking out of sync") + } + } + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + } else { + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") + } + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") + } + s.streamLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md new file mode 100644 index 0000000..183d797 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/spec.md @@ -0,0 +1,140 @@ +# Specification + +We use this document to detail the internal specification of Yamux. +This is used both as a guide for implementing Yamux, but also for +alternative interoperable libraries to be built. + +# Framing + +Yamux uses a streaming connection underneath, but imposes a message +framing so that it can be shared between many logical streams. Each +frame contains a header like: + +* Version (8 bits) +* Type (8 bits) +* Flags (16 bits) +* StreamID (32 bits) +* Length (32 bits) + +This means that each header has a 12 byte overhead. +All fields are encoded in network order (big endian). +Each field is described below: + +## Version Field + +The version field is used for future backward compatibility. At the +current time, the field is always set to 0, to indicate the initial +version. + +## Type Field + +The type field is used to switch the frame message type. The following +message types are supported: + +* 0x0 Data - Used to transmit data. May transmit zero length payloads + depending on the flags. + +* 0x1 Window Update - Used to updated the senders receive window size. + This is used to implement per-session flow control. + +* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat + and do keep-alives over TCP. + +* 0x3 Go Away - Used to close a session. + +## Flag Field + +The flags field is used to provide additional information related +to the message type. The following flags are supported: + +* 0x1 SYN - Signals the start of a new stream. May be sent with a data or + window update message. Also sent with a ping to indicate outbound. + +* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data + or window update message. Also sent with a ping to indicate response. + +* 0x4 FIN - Performs a half-close of a stream. May be sent with a data + message or window update. + +* 0x8 RST - Reset a stream immediately. May be sent with a data or + window update message. + +## StreamID Field + +The StreamID field is used to identify the logical stream the frame +is addressing. The client side should use odd ID's, and the server even. +This prevents any collisions. Additionally, the 0 ID is reserved to represent +the session. + +Both Ping and Go Away messages should always use the 0 StreamID. + +## Length Field + +The meaning of the length field depends on the message type: + +* Data - provides the length of bytes following the header +* Window update - provides a delta update to the window size +* Ping - Contains an opaque value, echoed back +* Go Away - Contains an error code + +# Message Flow + +There is no explicit connection setup, as Yamux relies on an underlying +transport to be provided. However, there is a distinction between client +and server side of the connection. + +## Opening a stream + +To open a stream, an initial data or window update frame is sent +with a new StreamID. The SYN flag should be set to signal a new stream. + +The receiver must then reply with either a data or window update frame +with the StreamID along with the ACK flag to accept the stream or with +the RST flag to reject the stream. + +Because we are relying on the reliable stream underneath, a connection +can begin sending data once the SYN flag is sent. The corresponding +ACK does not need to be received. This is particularly well suited +for an RPC system where a client wants to open a stream and immediately +fire a request without waiting for the RTT of the ACK. + +This does introduce the possibility of a connection being rejected +after data has been sent already. This is a slight semantic difference +from TCP, where the conection cannot be refused after it is opened. +Clients should be prepared to handle this by checking for an error +that indicates a RST was received. + +## Closing a stream + +To close a stream, either side sends a data or window update frame +along with the FIN flag. This does a half-close indicating the sender +will send no further data. + +Once both sides have closed the connection, the stream is closed. + +Alternatively, if an error occurs, the RST flag can be used to +hard close a stream immediately. + +## Flow Control + +When Yamux is initially starts each stream with a 256KB window size. +There is no window size for the session. + +To prevent the streams from stalling, window update frames should be +sent regularly. Yamux can be configured to provide a larger limit for +windows sizes. Both sides assume the initial 256KB window, but can +immediately send a window update as part of the SYN/ACK indicating a +larger window. + +Both sides should track the number of bytes sent in Data frames +only, as only they are tracked as part of the window size. + +## Session termination + +When a session is being terminated, the Go Away message should +be sent. The Length should be set to one of the following to +provide an error code: + +* 0x0 Normal termination +* 0x1 Protocol error +* 0x2 Internal error diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go new file mode 100644 index 0000000..aa23919 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -0,0 +1,470 @@ +package yamux + +import ( + "bytes" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline atomic.Value // time.Time + writeDeadline atomic.Value // time.Time +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + } + s.readDeadline.Store(time.Time{}) + s.writeDeadline.Store(time.Time{}) + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + s.stateLock.Unlock() + return 0, io.EOF + } + s.recvLock.Unlock() + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + return n, err + +WAIT: + var timeout <-chan time.Time + var timer *time.Timer + readDeadline := s.readDeadline.Load().(time.Time) + if !readDeadline.IsZero() { + delay := readDeadline.Sub(time.Now()) + timer = time.NewTimer(delay) + timeout = timer.C + } + select { + case <-s.recvNotifyCh: + if timer != nil { + timer.Stop() + } + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body io.Reader +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = bytes.NewReader(b[:max]) + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + writeDeadline := s.writeDeadline.Load().(time.Time) + if !writeDeadline.IsZero() { + delay := writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + var bufLen uint32 + s.recvLock.Lock() + if s.recvBuf != nil { + bufLen = uint32(s.recvBuf.Len()) + } + delta := (max - bufLen) - s.recvWindow + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + s.recvLock.Unlock() + return nil + } + + // Update our window + s.recvWindow += delta + s.recvLock.Unlock() + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + s.session.closeStream(s.id) + } + }() + + s.stateLock.Lock() + defer s.stateLock.Unlock() + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + s.session.establishStream(s.id) + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + + if length > s.recvWindow { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + return ErrRecvWindowExceeded + } + + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + if _, err := io.Copy(s.recvBuf, conn); err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + s.recvWindow -= length + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline.Store(t) + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline.Store(t) + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go new file mode 100644 index 0000000..8a73e92 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -0,0 +1,43 @@ +package yamux + +import ( + "sync" + "time" +) + +var ( + timerPool = &sync.Pool{ + New: func() interface{} { + timer := time.NewTimer(time.Hour * 1e6) + timer.Stop() + return timer + }, + } +) + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/hpcloud/tail/LICENSE.txt b/vendor/github.com/hpcloud/tail/LICENSE.txt new file mode 100644 index 0000000..818d802 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/LICENSE.txt @@ -0,0 +1,21 @@ +# The MIT License (MIT) + +# © Copyright 2015 Hewlett Packard Enterprise Development LP +Copyright (c) 2014 ActiveState + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hpcloud/tail/util/util.go b/vendor/github.com/hpcloud/tail/util/util.go new file mode 100644 index 0000000..54151fe --- /dev/null +++ b/vendor/github.com/hpcloud/tail/util/util.go @@ -0,0 +1,48 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package util + +import ( + "fmt" + "log" + "os" + "runtime/debug" +) + +type Logger struct { + *log.Logger +} + +var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)} + +// fatal is like panic except it displays only the current goroutine's stack. +func Fatal(format string, v ...interface{}) { + // https://github.com/hpcloud/log/blob/master/log.go#L45 + LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack())) + os.Exit(1) +} + +// partitionString partitions the string into chunks of given size, +// with the last chunk of variable size. +func PartitionString(s string, chunkSize int) []string { + if chunkSize <= 0 { + panic("invalid chunkSize") + } + length := len(s) + chunks := 1 + length/chunkSize + start := 0 + end := chunkSize + parts := make([]string, 0, chunks) + for { + if end > length { + end = length + } + parts = append(parts, s[start:end]) + if end == length { + break + } + start, end = end, end+chunkSize + } + return parts +} diff --git a/vendor/github.com/hpcloud/tail/watch/filechanges.go b/vendor/github.com/hpcloud/tail/watch/filechanges.go new file mode 100644 index 0000000..f80aead --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/filechanges.go @@ -0,0 +1,36 @@ +package watch + +type FileChanges struct { + Modified chan bool // Channel to get notified of modifications + Truncated chan bool // Channel to get notified of truncations + Deleted chan bool // Channel to get notified of deletions/renames +} + +func NewFileChanges() *FileChanges { + return &FileChanges{ + make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)} +} + +func (fc *FileChanges) NotifyModified() { + sendOnlyIfEmpty(fc.Modified) +} + +func (fc *FileChanges) NotifyTruncated() { + sendOnlyIfEmpty(fc.Truncated) +} + +func (fc *FileChanges) NotifyDeleted() { + sendOnlyIfEmpty(fc.Deleted) +} + +// sendOnlyIfEmpty sends on a bool channel only if the channel has no +// backlog to be read by other goroutines. This concurrency pattern +// can be used to notify other goroutines if and only if they are +// looking for it (i.e., subsequent notifications can be compressed +// into one). +func sendOnlyIfEmpty(ch chan bool) { + select { + case ch <- true: + default: + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify.go b/vendor/github.com/hpcloud/tail/watch/inotify.go new file mode 100644 index 0000000..2bbfe0b --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify.go @@ -0,0 +1,135 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify/fsnotify.v1" + "gopkg.in/tomb.v1" +) + +// InotifyFileWatcher uses inotify to monitor file changes. +type InotifyFileWatcher struct { + Filename string + Size int64 +} + +func NewInotifyFileWatcher(filename string) *InotifyFileWatcher { + fw := &InotifyFileWatcher{filepath.Clean(filename), 0} + return fw +} + +func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + err := WatchCreate(fw.Filename) + if err != nil { + return err + } + defer RemoveWatchCreate(fw.Filename) + + // Do a real check now as the file might have been created before + // calling `WatchFlags` above. + if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) { + // file exists, or stat returned an error. + return err + } + + events := Events(fw.Filename) + + for { + select { + case evt, ok := <-events: + if !ok { + return fmt.Errorf("inotify watcher has been closed") + } + evtName, err := filepath.Abs(evt.Name) + if err != nil { + return err + } + fwFilename, err := filepath.Abs(fw.Filename) + if err != nil { + return err + } + if evtName == fwFilename { + return nil + } + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + err := Watch(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + fw.Size = pos + + go func() { + + events := Events(fw.Filename) + + for { + prevSize := fw.Size + + var evt fsnotify.Event + var ok bool + + select { + case evt, ok = <-events: + if !ok { + RemoveWatch(fw.Filename) + return + } + case <-t.Dying(): + RemoveWatch(fw.Filename) + return + } + + switch { + case evt.Op&fsnotify.Remove == fsnotify.Remove: + fallthrough + + case evt.Op&fsnotify.Rename == fsnotify.Rename: + RemoveWatch(fw.Filename) + changes.NotifyDeleted() + return + + //With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod) + case evt.Op&fsnotify.Chmod == fsnotify.Chmod: + fallthrough + + case evt.Op&fsnotify.Write == fsnotify.Write: + fi, err := os.Stat(fw.Filename) + if err != nil { + if os.IsNotExist(err) { + RemoveWatch(fw.Filename) + changes.NotifyDeleted() + return + } + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + fw.Size = fi.Size() + + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + } else { + changes.NotifyModified() + } + prevSize = fw.Size + } + } + }() + + return changes, nil +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go new file mode 100644 index 0000000..739b3c2 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go @@ -0,0 +1,248 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "log" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify/fsnotify.v1" +) + +type InotifyTracker struct { + mux sync.Mutex + watcher *fsnotify.Watcher + chans map[string]chan fsnotify.Event + done map[string]chan bool + watchNums map[string]int + watch chan *watchInfo + remove chan *watchInfo + error chan error +} + +type watchInfo struct { + op fsnotify.Op + fname string +} + +func (this *watchInfo) isCreate() bool { + return this.op == fsnotify.Create +} + +var ( + // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used + shared *InotifyTracker + + // these are used to ensure the shared InotifyTracker is run exactly once + once = sync.Once{} + goRun = func() { + shared = &InotifyTracker{ + mux: sync.Mutex{}, + chans: make(map[string]chan fsnotify.Event), + done: make(map[string]chan bool), + watchNums: make(map[string]int), + watch: make(chan *watchInfo), + remove: make(chan *watchInfo), + error: make(chan error), + } + go shared.run() + } + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +// Watch signals the run goroutine to begin watching the input filename +func Watch(fname string) error { + return watch(&watchInfo{ + fname: fname, + }) +} + +// Watch create signals the run goroutine to begin watching the input filename +// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate +func WatchCreate(fname string) error { + return watch(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func watch(winfo *watchInfo) error { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.watch <- winfo + return <-shared.error +} + +// RemoveWatch signals the run goroutine to remove the watch for the input filename +func RemoveWatch(fname string) error { + return remove(&watchInfo{ + fname: fname, + }) +} + +// RemoveWatch create signals the run goroutine to remove the watch for the input filename +func RemoveWatchCreate(fname string) error { + return remove(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func remove(winfo *watchInfo) error { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.mux.Lock() + done := shared.done[winfo.fname] + if done != nil { + delete(shared.done, winfo.fname) + close(done) + } + shared.mux.Unlock() + + shared.remove <- winfo + return <-shared.error +} + +// Events returns a channel to which FileEvents corresponding to the input filename +// will be sent. This channel will be closed when removeWatch is called on this +// filename. +func Events(fname string) <-chan fsnotify.Event { + shared.mux.Lock() + defer shared.mux.Unlock() + + return shared.chans[fname] +} + +// Cleanup removes the watch for the input filename if necessary. +func Cleanup(fname string) error { + return RemoveWatch(fname) +} + +// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating +// a new Watcher if the previous Watcher was closed. +func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { + shared.mux.Lock() + defer shared.mux.Unlock() + + if shared.chans[winfo.fname] == nil { + shared.chans[winfo.fname] = make(chan fsnotify.Event) + } + if shared.done[winfo.fname] == nil { + shared.done[winfo.fname] = make(chan bool) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + + var err error + // already in inotify watch + if shared.watchNums[fname] == 0 { + err = shared.watcher.Add(fname) + } + if err == nil { + shared.watchNums[fname]++ + } + return err +} + +// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the +// corresponding events channel. +func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error { + shared.mux.Lock() + + ch := shared.chans[winfo.fname] + if ch != nil { + delete(shared.chans, winfo.fname) + close(ch) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + shared.watchNums[fname]-- + watchNum := shared.watchNums[fname] + if watchNum == 0 { + delete(shared.watchNums, fname) + } + shared.mux.Unlock() + + var err error + // If we were the last ones to watch this file, unsubscribe from inotify. + // This needs to happen after releasing the lock because fsnotify waits + // synchronously for the kernel to acknowledge the removal of the watch + // for this file, which causes us to deadlock if we still held the lock. + if watchNum == 0 { + err = shared.watcher.Remove(fname) + } + + return err +} + +// sendEvent sends the input event to the appropriate Tail. +func (shared *InotifyTracker) sendEvent(event fsnotify.Event) { + name := filepath.Clean(event.Name) + + shared.mux.Lock() + ch := shared.chans[name] + done := shared.done[name] + shared.mux.Unlock() + + if ch != nil && done != nil { + select { + case ch <- event: + case <-done: + } + } +} + +// run starts the goroutine in which the shared struct reads events from its +// Watcher's Event channel and sends the events to the appropriate Tail. +func (shared *InotifyTracker) run() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + util.Fatal("failed to create Watcher") + } + shared.watcher = watcher + + for { + select { + case winfo := <-shared.watch: + shared.error <- shared.addWatch(winfo) + + case winfo := <-shared.remove: + shared.error <- shared.removeWatch(winfo) + + case event, open := <-shared.watcher.Events: + if !open { + return + } + shared.sendEvent(event) + + case err, open := <-shared.watcher.Errors: + if !open { + return + } else if err != nil { + sysErr, ok := err.(*os.SyscallError) + if !ok || sysErr.Err != syscall.EINTR { + logger.Printf("Error in Watcher Error channel: %s", err) + } + } + } + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/polling.go b/vendor/github.com/hpcloud/tail/watch/polling.go new file mode 100644 index 0000000..49491f2 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/polling.go @@ -0,0 +1,118 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "os" + "runtime" + "time" + + "github.com/hpcloud/tail/util" + "gopkg.in/tomb.v1" +) + +// PollingFileWatcher polls the file for changes. +type PollingFileWatcher struct { + Filename string + Size int64 +} + +func NewPollingFileWatcher(filename string) *PollingFileWatcher { + fw := &PollingFileWatcher{filename, 0} + return fw +} + +var POLL_DURATION time.Duration + +func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + for { + if _, err := os.Stat(fw.Filename); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + select { + case <-time.After(POLL_DURATION): + continue + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + origFi, err := os.Stat(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + var prevModTime time.Time + + // XXX: use tomb.Tomb to cleanly manage these goroutines. replace + // the fatal (below) with tomb's Kill. + + fw.Size = pos + + go func() { + prevSize := fw.Size + for { + select { + case <-t.Dying(): + return + default: + } + + time.Sleep(POLL_DURATION) + fi, err := os.Stat(fw.Filename) + if err != nil { + // Windows cannot delete a file if a handle is still open (tail keeps one open) + // so it gives access denied to anything trying to read it until all handles are released. + if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) { + // File does not exist (has been deleted). + changes.NotifyDeleted() + return + } + + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + + // File got moved/renamed? + if !os.SameFile(origFi, fi) { + changes.NotifyDeleted() + return + } + + // File got truncated? + fw.Size = fi.Size() + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + prevSize = fw.Size + continue + } + // File got bigger? + if prevSize > 0 && prevSize < fw.Size { + changes.NotifyModified() + prevSize = fw.Size + continue + } + prevSize = fw.Size + + // File was appended to (changed)? + modTime := fi.ModTime() + if modTime != prevModTime { + prevModTime = modTime + changes.NotifyModified() + } + } + }() + + return changes, nil +} + +func init() { + POLL_DURATION = 250 * time.Millisecond +} diff --git a/vendor/github.com/hpcloud/tail/watch/watch.go b/vendor/github.com/hpcloud/tail/watch/watch.go new file mode 100644 index 0000000..2e1783e --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/watch.go @@ -0,0 +1,20 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import "gopkg.in/tomb.v1" + +// FileWatcher monitors file-level events. +type FileWatcher interface { + // BlockUntilExists blocks until the file comes into existence. + BlockUntilExists(*tomb.Tomb) error + + // ChangeEvents reports on changes to a file, be it modification, + // deletion, renames or truncations. Returned FileChanges group of + // channels will be closed, thus become unusable, after a deletion + // or truncation event. + // In order to properly report truncations, ChangeEvents requires + // the caller to pass their current offset in the file. + ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error) +} diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml new file mode 100644 index 0000000..d7b9589 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.7 + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 0000000..2298515 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 0000000..bcb8c8d --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 0000000..db6a6aa --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 0000000..1404352 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,548 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +// Copy returns a deep copy of v. +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := Copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 0000000..f9c841a --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 0000000..d70706d --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod new file mode 100644 index 0000000..7efa09a --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-homedir diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 0000000..2537853 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,167 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// Reset clears the cache, forcing the next call to Dir to re-detect +// the home directory. This generally never has to be called, but can be +// useful in tests if you're modifying the home directory via the HOME +// env var or something. +func Reset() { + cacheLock.Lock() + defer cacheLock.Unlock() + homedirCache = "" +} + +func dirUnix() (string, error) { + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, env vars are lowercase. + homeEnv = "home" + } + + // First prefer the HOME environmental variable + if home := os.Getenv(homeEnv); home != "" { + return home, nil + } + + var stdout bytes.Buffer + + // If that fails, try OS specific commands + if runtime.GOOS == "darwin" { + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result, nil + } + } + } else { + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml new file mode 100644 index 0000000..928d000 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.8 + - 1.x + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE new file mode 100644 index 0000000..a3866a2 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md new file mode 100644 index 0000000..26781bb --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/README.md @@ -0,0 +1,52 @@ +# go-testing-interface + +go-testing-interface is a Go library that exports an interface that +`*testing.T` implements as well as a runtime version you can use in its +place. + +The purpose of this library is so that you can export test helpers as a +public API without depending on the "testing" package, since you can't +create a `*testing.T` struct manually. This lets you, for example, use the +public testing APIs to generate mock data at runtime, rather than just at +test time. + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). + +Given a test helper written using `go-testing-interface` like this: + + import "github.com/mitchellh/go-testing-interface" + + func TestHelper(t testing.T) { + t.Fatal("I failed") + } + +You can call the test helper in a real test easily: + + import "testing" + + func TestThing(t *testing.T) { + TestHelper(t) + } + +You can also call the test helper at runtime if needed: + + import "github.com/mitchellh/go-testing-interface" + + func main() { + TestHelper(&testing.RuntimeT{}) + } + +## Why?! + +**Why would I call a test helper that takes a *testing.T at runtime?** + +You probably shouldn't. The only use case I've seen (and I've had) for this +is to implement a "dev mode" for a service where the test helpers are used +to populate mock data, create a mock DB, perhaps run service dependencies +in-memory, etc. + +Outside of a "dev mode", I've never seen a use case for this and I think +there shouldn't be one since the point of the `testing.T` interface is that +you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/go.mod b/vendor/github.com/mitchellh/go-testing-interface/go.mod new file mode 100644 index 0000000..062796d --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-testing-interface diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go new file mode 100644 index 0000000..204afb4 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -0,0 +1,84 @@ +// +build !go1.9 + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. Name and Skip methods are +// unimplemented noops. +type RuntimeT struct { + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.Fail() +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.FailNow() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Skip(args ...interface{}) {} +func (t *RuntimeT) SkipNow() {} +func (t *RuntimeT) Skipf(format string, args ...interface{}) {} +func (t *RuntimeT) Skipped() bool { return false } diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go new file mode 100644 index 0000000..31b42ca --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -0,0 +1,108 @@ +// +build go1.9 + +// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition +// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC +// and is set for release shortly. We'll support this on master as the default +// as soon as 1.9 is released. + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + Helper() +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. +type RuntimeT struct { + skipped bool + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Printf(format, args...) + t.Fail() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE new file mode 100644 index 0000000..a3866a2 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md new file mode 100644 index 0000000..7d0de5b --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/README.md @@ -0,0 +1,61 @@ +# hashstructure + +hashstructure is a Go library for creating a unique hash value +for arbitrary values in Go. + +This can be used to key values in a hash (for use in a map, set, etc.) +that are complex. The most common use case is comparing two values without +sending data across the network, caching values locally (de-dup), and so on. + +## Features + + * Hash any arbitrary Go value, including complex types. + + * Tag a struct field to ignore it and not affect the hash value. + + * Tag a slice type struct field to treat it as a set where ordering + doesn't affect the hash code but the field itself is still taken into + account to create the hash value. + + * Optionally specify a custom hash function to optimize for speed, collision + avoidance for your data set, etc. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/hashstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). + +A quick code example is shown below: + + + type ComplexStruct struct { + Name string + Age uint + Metadata map[string]interface{} + } + + v := ComplexStruct{ + Name: "mitchellh", + Age: 64, + Metadata: map[string]interface{}{ + "car": true, + "location": "California", + "siblings": []string{"Bob", "John"}, + }, + } + + hash, err := hashstructure.Hash(v, nil) + if err != nil { + panic(err) + } + + fmt.Printf("%d", hash) + // Output: + // 2307517237273902113 diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go new file mode 100644 index 0000000..663fd72 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/hashstructure.go @@ -0,0 +1,323 @@ +package hashstructure + +import ( + "encoding/binary" + "fmt" + "hash" + "hash/fnv" + "reflect" +) + +// HashOptions are options that are available for hashing. +type HashOptions struct { + // Hasher is the hash function to use. If this isn't set, it will + // default to FNV. + Hasher hash.Hash64 + + // TagName is the struct tag to look at when hashing the structure. + // By default this is "hash". + TagName string +} + +// Hash returns the hash value of an arbitrary value. +// +// If opts is nil, then default options will be used. See HashOptions +// for the default values. +// +// Notes on the value: +// +// * Unexported fields on structs are ignored and do not affect the +// hash value. +// +// * Adding an exported field to a struct with the zero value will change +// the hash value. +// +// For structs, the hashing can be controlled using tags. For example: +// +// struct { +// Name string +// UUID string `hash:"ignore"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored and not affect the hash code. +// +// * "set" - The field will be treated as a set, where ordering doesn't +// affect the hash code. This only works for slices. +// +func Hash(v interface{}, opts *HashOptions) (uint64, error) { + // Create default options + if opts == nil { + opts = &HashOptions{} + } + if opts.Hasher == nil { + opts.Hasher = fnv.New64() + } + if opts.TagName == "" { + opts.TagName = "hash" + } + + // Reset the hash + opts.Hasher.Reset() + + // Create our walker and walk the structure + w := &walker{ + h: opts.Hasher, + tag: opts.TagName, + } + return w.visit(reflect.ValueOf(v), nil) +} + +type walker struct { + h hash.Hash64 + tag string +} + +type visitOpts struct { + // Flags are a bitmask of flags to affect behavior of this visit + Flags visitFlag + + // Information about the struct containing this field + Struct interface{} + StructField string +} + +func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { + // Loop since these can be wrapped in multiple layers of pointers + // and interfaces. + for { + // If we have an interface, dereference it. We have to do this up + // here because it might be a nil in there and the check below must + // catch that. + if v.Kind() == reflect.Interface { + v = v.Elem() + continue + } + + if v.Kind() == reflect.Ptr { + v = reflect.Indirect(v) + continue + } + + break + } + + // If it is nil, treat it like a zero. + if !v.IsValid() { + var tmp int8 + v = reflect.ValueOf(tmp) + } + + // Binary writing can use raw ints, we have to convert to + // a sized-int, we'll choose the largest... + switch v.Kind() { + case reflect.Int: + v = reflect.ValueOf(int64(v.Int())) + case reflect.Uint: + v = reflect.ValueOf(uint64(v.Uint())) + case reflect.Bool: + var tmp int8 + if v.Bool() { + tmp = 1 + } + v = reflect.ValueOf(tmp) + } + + k := v.Kind() + + // We can shortcut numeric values by directly binary writing them + if k >= reflect.Int && k <= reflect.Complex64 { + // A direct hash calculation + w.h.Reset() + err := binary.Write(w.h, binary.LittleEndian, v.Interface()) + return w.h.Sum64(), err + } + + switch k { + case reflect.Array: + var h uint64 + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + h = hashUpdateOrdered(w.h, h, current) + } + + return h, nil + + case reflect.Map: + var includeMap IncludableMap + if opts != nil && opts.Struct != nil { + if v, ok := opts.Struct.(IncludableMap); ok { + includeMap = v + } + } + + // Build the hash for the map. We do this by XOR-ing all the key + // and value hashes. This makes it deterministic despite ordering. + var h uint64 + for _, k := range v.MapKeys() { + v := v.MapIndex(k) + if includeMap != nil { + incl, err := includeMap.HashIncludeMap( + opts.StructField, k.Interface(), v.Interface()) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + kh, err := w.visit(k, nil) + if err != nil { + return 0, err + } + vh, err := w.visit(v, nil) + if err != nil { + return 0, err + } + + h = hashUpdateUnordered(h, kh) + h = hashUpdateUnordered(h, vh) + } + + return h, nil + + case reflect.Struct: + var include Includable + parent := v.Interface() + if impl, ok := parent.(Includable); ok { + include = impl + } + + t := v.Type() + h, err := w.visit(reflect.ValueOf(t.Name()), nil) + if err != nil { + return 0, err + } + + l := v.NumField() + for i := 0; i < l; i++ { + if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + var f visitFlag + fieldType := t.Field(i) + if fieldType.PkgPath != "" { + // Unexported + continue + } + + tag := fieldType.Tag.Get(w.tag) + if tag == "ignore" { + // Ignore this field + continue + } + + // Check if we implement includable and check it + if include != nil { + incl, err := include.HashInclude(fieldType.Name, v) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + switch tag { + case "set": + f |= visitFlagSet + } + + kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) + if err != nil { + return 0, err + } + + vh, err := w.visit(v, &visitOpts{ + Flags: f, + Struct: parent, + StructField: fieldType.Name, + }) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + } + + return h, nil + + case reflect.Slice: + // We have two behaviors here. If it isn't a set, then we just + // visit all the elements. If it is a set, then we do a deterministic + // hash code. + var h uint64 + var set bool + if opts != nil { + set = (opts.Flags & visitFlagSet) != 0 + } + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + if set { + h = hashUpdateUnordered(h, current) + } else { + h = hashUpdateOrdered(w.h, h, current) + } + } + + return h, nil + + case reflect.String: + // Directly hash + w.h.Reset() + _, err := w.h.Write([]byte(v.String())) + return w.h.Sum64(), err + + default: + return 0, fmt.Errorf("unknown kind to hash: %s", k) + } + + return 0, nil +} + +func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { + // For ordered updates, use a real hash function + h.Reset() + + // We just panic if the binary writes fail because we are writing + // an int64 which should never be fail-able. + e1 := binary.Write(h, binary.LittleEndian, a) + e2 := binary.Write(h, binary.LittleEndian, b) + if e1 != nil { + panic(e1) + } + if e2 != nil { + panic(e2) + } + + return h.Sum64() +} + +func hashUpdateUnordered(a, b uint64) uint64 { + return a ^ b +} + +// visitFlag is used as a bitmask for affecting visit behavior +type visitFlag uint + +const ( + visitFlagInvalid visitFlag = iota + visitFlagSet = iota << 1 +) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go new file mode 100644 index 0000000..b6289c0 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/include.go @@ -0,0 +1,15 @@ +package hashstructure + +// Includable is an interface that can optionally be implemented by +// a struct. It will be called for each field in the struct to check whether +// it should be included in the hash. +type Includable interface { + HashInclude(field string, v interface{}) (bool, error) +} + +// IncludableMap is an interface that can optionally be implemented by +// a struct. It will be called when a map-type field is found to ask the +// struct if the map item should be included in the hash. +type IncludableMap interface { + HashIncludeMap(field string, k, v interface{}) (bool, error) +} diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 0000000..4f2ee4d --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 0000000..f9c841a --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 0000000..ac82cd2 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/go.mod b/vendor/github.com/mitchellh/reflectwalk/go.mod new file mode 100644 index 0000000..52bb7c4 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/reflectwalk diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 0000000..6a7f176 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 0000000..70760cf --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} + +func (i Location) String() string { + if i >= Location(len(_Location_index)-1) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 0000000..d7ab7b6 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,401 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + if err := walk(kv, w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore new file mode 100644 index 0000000..a1338d6 --- /dev/null +++ b/vendor/github.com/oklog/run/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml new file mode 100644 index 0000000..362bdd4 --- /dev/null +++ b/vendor/github.com/oklog/run/.travis.yml @@ -0,0 +1,12 @@ +language: go +sudo: false +go: + - 1.x + - tip +install: + - go get -v github.com/golang/lint/golint + - go build ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/oklog/run/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md new file mode 100644 index 0000000..a7228cd --- /dev/null +++ b/vendor/github.com/oklog/run/README.md @@ -0,0 +1,73 @@ +# run + +[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) +[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run) +[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) + +run.Group is a universal mechanism to manage goroutine lifecycles. + +Create a zero-value run.Group, and then add actors to it. Actors are defined as +a pair of functions: an **execute** function, which should run synchronously; +and an **interrupt** function, which, when invoked, should cause the execute +function to return. Finally, invoke Run, which blocks until the first actor +returns. This general-purpose API allows callers to model pretty much any +runnable task, and achieve well-defined lifecycle semantics for the group. + +run.Group was written to manage component lifecycles in func main for +[OK Log](https://github.com/oklog/oklog). +But it's useful in any circumstance where you need to orchestrate multiple +goroutines as a unit whole. +[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a +video of a talk where run.Group is described. + +## Examples + +### context.Context + +```go +ctx, cancel := context.WithCancel(context.Background()) +g.Add(func() error { + return myProcess(ctx, ...) +}, func(error) { + cancel() +}) +``` + +### net.Listener + +```go +ln, _ := net.Listen("tcp", ":8080") +g.Add(func() error { + return http.Serve(ln, nil) +}, func(error) { + ln.Close() +}) +``` + +### io.ReadCloser + +```go +var conn io.ReadCloser = ... +g.Add(func() error { + s := bufio.NewScanner(conn) + for s.Scan() { + println(s.Text()) + } + return s.Err() +}, func(error) { + conn.Close() +}) +``` + +## Comparisons + +Package run is somewhat similar to package +[errgroup](https://godoc.org/golang.org/x/sync/errgroup), +except it doesn't require actor goroutines to understand context semantics. + +It's somewhat similar to package +[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or +[tomb.v2](https://godoc.org/gopkg.in/tomb.v2), +except it has a much smaller API surface, delegating e.g. staged shutdown of +goroutines to the caller. diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go new file mode 100644 index 0000000..832d47d --- /dev/null +++ b/vendor/github.com/oklog/run/group.go @@ -0,0 +1,62 @@ +// Package run implements an actor-runner with deterministic teardown. It is +// somewhat similar to package errgroup, except it does not require actor +// goroutines to understand context semantics. This makes it suitable for use in +// more circumstances; for example, goroutines which are handling connections +// from net.Listeners, or scanning input from a closable io.Reader. +package run + +// Group collects actors (functions) and runs them concurrently. +// When one actor (function) returns, all actors are interrupted. +// The zero value of a Group is useful. +type Group struct { + actors []actor +} + +// Add an actor (function) to the group. Each actor must be pre-emptable by an +// interrupt function. That is, if interrupt is invoked, execute should return. +// Also, it must be safe to call interrupt even after execute has returned. +// +// The first actor (function) to return interrupts all running actors. +// The error is passed to the interrupt functions, and is returned by Run. +func (g *Group) Add(execute func() error, interrupt func(error)) { + g.actors = append(g.actors, actor{execute, interrupt}) +} + +// Run all actors (functions) concurrently. +// When the first actor returns, all others are interrupted. +// Run only returns when all actors have exited. +// Run returns the error returned by the first exiting actor. +func (g *Group) Run() error { + if len(g.actors) == 0 { + return nil + } + + // Run each actor. + errors := make(chan error, len(g.actors)) + for _, a := range g.actors { + go func(a actor) { + errors <- a.execute() + }(a) + } + + // Wait for the first actor to stop. + err := <-errors + + // Signal all actors to stop. + for _, a := range g.actors { + a.interrupt(err) + } + + // Wait for all actors to stop. + for i := 1; i < cap(errors); i++ { + <-errors + } + + // Return the original error. + return err +} + +type actor struct { + execute func() error + interrupt func(error) +} diff --git a/vendor/github.com/ryanuber/go-glob/.travis.yml b/vendor/github.com/ryanuber/go-glob/.travis.yml new file mode 100644 index 0000000..9d1ca3c --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - tip +script: + - go test -v ./... diff --git a/vendor/github.com/ryanuber/go-glob/LICENSE b/vendor/github.com/ryanuber/go-glob/LICENSE new file mode 100644 index 0000000..bdfbd95 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Ryan Uber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ryanuber/go-glob/README.md b/vendor/github.com/ryanuber/go-glob/README.md new file mode 100644 index 0000000..48f7fcb --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/README.md @@ -0,0 +1,29 @@ +# String globbing in golang [![Build Status](https://travis-ci.org/ryanuber/go-glob.svg)](https://travis-ci.org/ryanuber/go-glob) + +`go-glob` is a single-function library implementing basic string glob support. + +Globs are an extremely user-friendly way of supporting string matching without +requiring knowledge of regular expressions or Go's particular regex engine. Most +people understand that if you put a `*` character somewhere in a string, it is +treated as a wildcard. Surprisingly, this functionality isn't found in Go's +standard library, except for `path.Match`, which is intended to be used while +comparing paths (not arbitrary strings), and contains specialized logic for this +use case. A better solution might be a POSIX basic (non-ERE) regular expression +engine for Go, which doesn't exist currently. + +Example +======= + +``` +package main + +import "github.com/ryanuber/go-glob" + +func main() { + glob.Glob("*World!", "Hello, World!") // true + glob.Glob("Hello,*", "Hello, World!") // true + glob.Glob("*ello,*", "Hello, World!") // true + glob.Glob("World!", "Hello, World!") // false + glob.Glob("/home/*", "/home/ryanuber/.bashrc") // true +} +``` diff --git a/vendor/github.com/ryanuber/go-glob/glob.go b/vendor/github.com/ryanuber/go-glob/glob.go new file mode 100644 index 0000000..e67db3b --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/glob.go @@ -0,0 +1,56 @@ +package glob + +import "strings" + +// The character which is treated like a glob +const GLOB = "*" + +// Glob will test a string pattern, potentially containing globs, against a +// subject string. The result is a simple true/false, determining whether or +// not the glob pattern matched the subject text. +func Glob(pattern, subj string) bool { + // Empty pattern can only match empty subject + if pattern == "" { + return subj == pattern + } + + // If the pattern _is_ a glob, it matches everything + if pattern == GLOB { + return true + } + + parts := strings.Split(pattern, GLOB) + + if len(parts) == 1 { + // No globs in pattern, so test for equality + return subj == pattern + } + + leadingGlob := strings.HasPrefix(pattern, GLOB) + trailingGlob := strings.HasSuffix(pattern, GLOB) + end := len(parts) - 1 + + // Go over the leading parts and ensure they match. + for i := 0; i < end; i++ { + idx := strings.Index(subj, parts[i]) + + switch i { + case 0: + // Check the first section. Requires special handling. + if !leadingGlob && idx != 0 { + return false + } + default: + // Check that the middle parts match. + if idx < 0 { + return false + } + } + + // Trim evaluated text from subj as we loop over the pattern. + subj = subj[idx+len(parts[i]):] + } + + // Reached the last section. Requires special handling. + return trailingGlob || strings.HasSuffix(subj, parts[end]) +} diff --git a/vendor/github.com/shirou/gopsutil/LICENSE b/vendor/github.com/shirou/gopsutil/LICENSE new file mode 100644 index 0000000..da71a5e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/LICENSE @@ -0,0 +1,61 @@ +gopsutil is distributed under BSD license reproduced below. + +Copyright (c) 2014, WAKAYAMA Shirou +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the gopsutil authors nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +------- +internal/common/binary.go in the gopsutil is copied and modifid from golang/encoding/binary.go. + + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu.go b/vendor/github.com/shirou/gopsutil/cpu/cpu.go new file mode 100644 index 0000000..ceaf77f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu.go @@ -0,0 +1,189 @@ +package cpu + +import ( + "context" + "encoding/json" + "fmt" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/shirou/gopsutil/internal/common" +) + +// TimesStat contains the amounts of time the CPU has spent performing different +// kinds of work. Time units are in USER_HZ or Jiffies (typically hundredths of +// a second). It is based on linux /proc/stat file. +type TimesStat struct { + CPU string `json:"cpu"` + User float64 `json:"user"` + System float64 `json:"system"` + Idle float64 `json:"idle"` + Nice float64 `json:"nice"` + Iowait float64 `json:"iowait"` + Irq float64 `json:"irq"` + Softirq float64 `json:"softirq"` + Steal float64 `json:"steal"` + Guest float64 `json:"guest"` + GuestNice float64 `json:"guestNice"` + Stolen float64 `json:"stolen"` +} + +type InfoStat struct { + CPU int32 `json:"cpu"` + VendorID string `json:"vendorId"` + Family string `json:"family"` + Model string `json:"model"` + Stepping int32 `json:"stepping"` + PhysicalID string `json:"physicalId"` + CoreID string `json:"coreId"` + Cores int32 `json:"cores"` + ModelName string `json:"modelName"` + Mhz float64 `json:"mhz"` + CacheSize int32 `json:"cacheSize"` + Flags []string `json:"flags"` + Microcode string `json:"microcode"` +} + +type lastPercent struct { + sync.Mutex + lastCPUTimes []TimesStat + lastPerCPUTimes []TimesStat +} + +var lastCPUPercent lastPercent +var invoke common.Invoker = common.Invoke{} + +func init() { + lastCPUPercent.Lock() + lastCPUPercent.lastCPUTimes, _ = Times(false) + lastCPUPercent.lastPerCPUTimes, _ = Times(true) + lastCPUPercent.Unlock() +} + +func Counts(logical bool) (int, error) { + return CountsWithContext(context.Background(), logical) +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} + +func (c TimesStat) String() string { + v := []string{ + `"cpu":"` + c.CPU + `"`, + `"user":` + strconv.FormatFloat(c.User, 'f', 1, 64), + `"system":` + strconv.FormatFloat(c.System, 'f', 1, 64), + `"idle":` + strconv.FormatFloat(c.Idle, 'f', 1, 64), + `"nice":` + strconv.FormatFloat(c.Nice, 'f', 1, 64), + `"iowait":` + strconv.FormatFloat(c.Iowait, 'f', 1, 64), + `"irq":` + strconv.FormatFloat(c.Irq, 'f', 1, 64), + `"softirq":` + strconv.FormatFloat(c.Softirq, 'f', 1, 64), + `"steal":` + strconv.FormatFloat(c.Steal, 'f', 1, 64), + `"guest":` + strconv.FormatFloat(c.Guest, 'f', 1, 64), + `"guestNice":` + strconv.FormatFloat(c.GuestNice, 'f', 1, 64), + `"stolen":` + strconv.FormatFloat(c.Stolen, 'f', 1, 64), + } + + return `{` + strings.Join(v, ",") + `}` +} + +// Total returns the total number of seconds in a CPUTimesStat +func (c TimesStat) Total() float64 { + total := c.User + c.System + c.Nice + c.Iowait + c.Irq + c.Softirq + c.Steal + + c.Guest + c.GuestNice + c.Idle + c.Stolen + return total +} + +func (c InfoStat) String() string { + s, _ := json.Marshal(c) + return string(s) +} + +func getAllBusy(t TimesStat) (float64, float64) { + busy := t.User + t.System + t.Nice + t.Iowait + t.Irq + + t.Softirq + t.Steal + t.Guest + t.GuestNice + t.Stolen + return busy + t.Idle, busy +} + +func calculateBusy(t1, t2 TimesStat) float64 { + t1All, t1Busy := getAllBusy(t1) + t2All, t2Busy := getAllBusy(t2) + + if t2Busy <= t1Busy { + return 0 + } + if t2All <= t1All { + return 1 + } + return (t2Busy - t1Busy) / (t2All - t1All) * 100 +} + +func calculateAllBusy(t1, t2 []TimesStat) ([]float64, error) { + // Make sure the CPU measurements have the same length. + if len(t1) != len(t2) { + return nil, fmt.Errorf( + "received two CPU counts: %d != %d", + len(t1), len(t2), + ) + } + + ret := make([]float64, len(t1)) + for i, t := range t2 { + ret[i] = calculateBusy(t1[i], t) + } + return ret, nil +} + +// Percent calculates the percentage of cpu used either per CPU or combined. +// If an interval of 0 is given it will compare the current cpu times against the last call. +// Returns one value per cpu, or a single value if percpu is set to false. +func Percent(interval time.Duration, percpu bool) ([]float64, error) { + return PercentWithContext(context.Background(), interval, percpu) +} + +func PercentWithContext(ctx context.Context, interval time.Duration, percpu bool) ([]float64, error) { + if interval <= 0 { + return percentUsedFromLastCall(percpu) + } + + // Get CPU usage at the start of the interval. + cpuTimes1, err := Times(percpu) + if err != nil { + return nil, err + } + + time.Sleep(interval) + + // And at the end of the interval. + cpuTimes2, err := Times(percpu) + if err != nil { + return nil, err + } + + return calculateAllBusy(cpuTimes1, cpuTimes2) +} + +func percentUsedFromLastCall(percpu bool) ([]float64, error) { + cpuTimes, err := Times(percpu) + if err != nil { + return nil, err + } + lastCPUPercent.Lock() + defer lastCPUPercent.Unlock() + var lastTimes []TimesStat + if percpu { + lastTimes = lastCPUPercent.lastPerCPUTimes + lastCPUPercent.lastPerCPUTimes = cpuTimes + } else { + lastTimes = lastCPUPercent.lastCPUTimes + lastCPUPercent.lastCPUTimes = cpuTimes + } + + if lastTimes == nil { + return nil, fmt.Errorf("error getting times for cpu percent. lastTimes was nil") + } + return calculateAllBusy(lastTimes, cpuTimes) +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin.go new file mode 100644 index 0000000..74d2737 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin.go @@ -0,0 +1,115 @@ +// +build darwin + +package cpu + +import ( + "context" + "os/exec" + "strconv" + "strings" +) + +// sys/resource.h +const ( + CPUser = 0 + CPNice = 1 + CPSys = 2 + CPIntr = 3 + CPIdle = 4 + CPUStates = 5 +) + +// default value. from time.h +var ClocksPerSec = float64(128) + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + return perCPUTimes() + } + + return allCPUTimes() +} + +// Returns only one CPUInfoStat on FreeBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + sysctl, err := exec.LookPath("/usr/sbin/sysctl") + if err != nil { + return ret, err + } + out, err := invoke.CommandWithContext(ctx, sysctl, "machdep.cpu") + if err != nil { + return ret, err + } + + c := InfoStat{} + for _, line := range strings.Split(string(out), "\n") { + values := strings.Fields(line) + if len(values) < 1 { + continue + } + + t, err := strconv.ParseInt(values[1], 10, 64) + // err is not checked here because some value is string. + if strings.HasPrefix(line, "machdep.cpu.brand_string") { + c.ModelName = strings.Join(values[1:], " ") + } else if strings.HasPrefix(line, "machdep.cpu.family") { + c.Family = values[1] + } else if strings.HasPrefix(line, "machdep.cpu.model") { + c.Model = values[1] + } else if strings.HasPrefix(line, "machdep.cpu.stepping") { + if err != nil { + return ret, err + } + c.Stepping = int32(t) + } else if strings.HasPrefix(line, "machdep.cpu.features") { + for _, v := range values[1:] { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } else if strings.HasPrefix(line, "machdep.cpu.leaf7_features") { + for _, v := range values[1:] { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } else if strings.HasPrefix(line, "machdep.cpu.extfeatures") { + for _, v := range values[1:] { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } else if strings.HasPrefix(line, "machdep.cpu.core_count") { + if err != nil { + return ret, err + } + c.Cores = int32(t) + } else if strings.HasPrefix(line, "machdep.cpu.cache.size") { + if err != nil { + return ret, err + } + c.CacheSize = int32(t) + } else if strings.HasPrefix(line, "machdep.cpu.vendor") { + c.VendorID = values[1] + } + } + + // Use the rated frequency of the CPU. This is a static value and does not + // account for low power or Turbo Boost modes. + out, err = invoke.CommandWithContext(ctx, sysctl, "hw.cpufrequency") + if err != nil { + return ret, err + } + + values := strings.Fields(string(out)) + hz, err := strconv.ParseFloat(values[1], 64) + if err != nil { + return ret, err + } + c.Mhz = hz / 1000000.0 + + return append(ret, c), nil +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_cgo.go new file mode 100644 index 0000000..180e0af --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_cgo.go @@ -0,0 +1,111 @@ +// +build darwin +// +build cgo + +package cpu + +/* +#include +#include +#include +#include +#include +#include +#if TARGET_OS_MAC +#include +#endif +#include +#include +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "fmt" + "unsafe" +) + +// these CPU times for darwin is borrowed from influxdb/telegraf. + +func perCPUTimes() ([]TimesStat, error) { + var ( + count C.mach_msg_type_number_t + cpuload *C.processor_cpu_load_info_data_t + ncpu C.natural_t + ) + + status := C.host_processor_info(C.host_t(C.mach_host_self()), + C.PROCESSOR_CPU_LOAD_INFO, + &ncpu, + (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_processor_info error=%d", status) + } + + // jump through some cgo casting hoops and ensure we properly free + // the memory that cpuload points to + target := C.vm_map_t(C.mach_task_self_) + address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) + defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) + + // the body of struct processor_cpu_load_info + // aka processor_cpu_load_info_data_t + var cpu_ticks [C.CPU_STATE_MAX]uint32 + + // copy the cpuload array to a []byte buffer + // where we can binary.Read the data + size := int(ncpu) * binary.Size(cpu_ticks) + buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size] + + bbuf := bytes.NewBuffer(buf) + + var ret []TimesStat + + for i := 0; i < int(ncpu); i++ { + err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) + if err != nil { + return nil, err + } + + c := TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, + System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, + Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, + Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, + } + + ret = append(ret, c) + } + + return ret, nil +} + +func allCPUTimes() ([]TimesStat, error) { + var count C.mach_msg_type_number_t + var cpuload C.host_cpu_load_info_data_t + + count = C.HOST_CPU_LOAD_INFO_COUNT + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + c := TimesStat{ + CPU: "cpu-total", + User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, + System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, + Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, + Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, + } + + return []TimesStat{c}, nil + +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_nocgo.go new file mode 100644 index 0000000..242b4a8 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_darwin_nocgo.go @@ -0,0 +1,14 @@ +// +build darwin +// +build !cgo + +package cpu + +import "github.com/shirou/gopsutil/internal/common" + +func perCPUTimes() ([]TimesStat, error) { + return []TimesStat{}, common.ErrNotImplementedError +} + +func allCPUTimes() ([]TimesStat, error) { + return []TimesStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_fallback.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_fallback.go new file mode 100644 index 0000000..e9e7ada --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_fallback.go @@ -0,0 +1,25 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows + +package cpu + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + return []TimesStat{}, common.ErrNotImplementedError +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + return []InfoStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd.go new file mode 100644 index 0000000..b6c7186 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd.go @@ -0,0 +1,168 @@ +package cpu + +import ( + "context" + "fmt" + "os/exec" + "reflect" + "regexp" + "strconv" + "strings" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +var ClocksPerSec = float64(128) +var cpuMatch = regexp.MustCompile(`^CPU:`) +var originMatch = regexp.MustCompile(`Origin\s*=\s*"(.+)"\s+Id\s*=\s*(.+)\s+Family\s*=\s*(.+)\s+Model\s*=\s*(.+)\s+Stepping\s*=\s*(.+)`) +var featuresMatch = regexp.MustCompile(`Features=.+<(.+)>`) +var featuresMatch2 = regexp.MustCompile(`Features2=[a-f\dx]+<(.+)>`) +var cpuEnd = regexp.MustCompile(`^Trying to mount root`) +var cpuCores = regexp.MustCompile(`FreeBSD/SMP: (\d*) package\(s\) x (\d*) core\(s\)`) +var cpuTimesSize int +var emptyTimes cpuTimes + +func init() { + getconf, err := exec.LookPath("/usr/bin/getconf") + if err != nil { + return + } + out, err := invoke.Command(getconf, "CLK_TCK") + // ignore errors + if err == nil { + i, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64) + if err == nil { + ClocksPerSec = float64(i) + } + } +} + +func timeStat(name string, t *cpuTimes) *TimesStat { + return &TimesStat{ + User: float64(t.User) / ClocksPerSec, + Nice: float64(t.Nice) / ClocksPerSec, + System: float64(t.Sys) / ClocksPerSec, + Idle: float64(t.Idle) / ClocksPerSec, + Irq: float64(t.Intr) / ClocksPerSec, + CPU: name, + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + buf, err := unix.SysctlRaw("kern.cp_times") + if err != nil { + return nil, err + } + + // We can't do this in init due to the conflict with cpu.init() + if cpuTimesSize == 0 { + cpuTimesSize = int(reflect.TypeOf(cpuTimes{}).Size()) + } + + ncpus := len(buf) / cpuTimesSize + ret := make([]TimesStat, 0, ncpus) + for i := 0; i < ncpus; i++ { + times := (*cpuTimes)(unsafe.Pointer(&buf[i*cpuTimesSize])) + if *times == emptyTimes { + // CPU not present + continue + } + ret = append(ret, *timeStat(fmt.Sprintf("cpu%d", len(ret)), times)) + } + return ret, nil + } + + buf, err := unix.SysctlRaw("kern.cp_time") + if err != nil { + return nil, err + } + + times := (*cpuTimes)(unsafe.Pointer(&buf[0])) + return []TimesStat{*timeStat("cpu-total", times)}, nil +} + +// Returns only one InfoStat on FreeBSD. The information regarding core +// count, however is accurate and it is assumed that all InfoStat attributes +// are the same across CPUs. +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + const dmesgBoot = "/var/run/dmesg.boot" + + c, num, err := parseDmesgBoot(dmesgBoot) + if err != nil { + return nil, err + } + + var u32 uint32 + if u32, err = unix.SysctlUint32("hw.clockrate"); err != nil { + return nil, err + } + c.Mhz = float64(u32) + + if u32, err = unix.SysctlUint32("hw.ncpu"); err != nil { + return nil, err + } + c.Cores = int32(u32) + + if c.ModelName, err = unix.Sysctl("hw.model"); err != nil { + return nil, err + } + + ret := make([]InfoStat, num) + for i := 0; i < num; i++ { + ret[i] = c + } + + return ret, nil +} + +func parseDmesgBoot(fileName string) (InfoStat, int, error) { + c := InfoStat{} + lines, _ := common.ReadLines(fileName) + cpuNum := 1 // default cpu num is 1 + for _, line := range lines { + if matches := cpuEnd.FindStringSubmatch(line); matches != nil { + break + } else if matches := originMatch.FindStringSubmatch(line); matches != nil { + c.VendorID = matches[1] + c.Family = matches[3] + c.Model = matches[4] + t, err := strconv.ParseInt(matches[5], 10, 32) + if err != nil { + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %v", line, err) + } + c.Stepping = int32(t) + } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil { + for _, v := range strings.Split(matches[1], ",") { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } else if matches := featuresMatch2.FindStringSubmatch(line); matches != nil { + for _, v := range strings.Split(matches[1], ",") { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } else if matches := cpuCores.FindStringSubmatch(line); matches != nil { + t, err := strconv.ParseInt(matches[1], 10, 32) + if err != nil { + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %v", line, err) + } + cpuNum = int(t) + t2, err := strconv.ParseInt(matches[2], 10, 32) + if err != nil { + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %v", line, err) + } + c.Cores = int32(t2) + } + } + + return c, cpuNum, nil +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_386.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_386.go new file mode 100644 index 0000000..8b7f4c3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_386.go @@ -0,0 +1,9 @@ +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_amd64.go new file mode 100644 index 0000000..57e1452 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_amd64.go @@ -0,0 +1,9 @@ +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_arm.go new file mode 100644 index 0000000..8b7f4c3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_freebsd_arm.go @@ -0,0 +1,9 @@ +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_linux.go new file mode 100644 index 0000000..2fffe85 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_linux.go @@ -0,0 +1,284 @@ +// +build linux + +package cpu + +import ( + "context" + "errors" + "fmt" + "os/exec" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" +) + +var CPUTick = float64(100) + +func init() { + getconf, err := exec.LookPath("/usr/bin/getconf") + if err != nil { + return + } + out, err := invoke.CommandWithContext(context.Background(), getconf, "CLK_TCK") + // ignore errors + if err == nil { + i, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64) + if err == nil { + CPUTick = i + } + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + filename := common.HostProc("stat") + var lines = []string{} + if percpu { + statlines, err := common.ReadLines(filename) + if err != nil || len(statlines) < 2 { + return []TimesStat{}, nil + } + for _, line := range statlines[1:] { + if !strings.HasPrefix(line, "cpu") { + break + } + lines = append(lines, line) + } + } else { + lines, _ = common.ReadLinesOffsetN(filename, 0, 1) + } + + ret := make([]TimesStat, 0, len(lines)) + + for _, line := range lines { + ct, err := parseStatLine(line) + if err != nil { + continue + } + ret = append(ret, *ct) + + } + return ret, nil +} + +func sysCPUPath(cpu int32, relPath string) string { + return common.HostSys(fmt.Sprintf("devices/system/cpu/cpu%d", cpu), relPath) +} + +func finishCPUInfo(c *InfoStat) error { + var lines []string + var err error + var value float64 + + if len(c.CoreID) == 0 { + lines, err = common.ReadLines(sysCPUPath(c.CPU, "topology/core_id")) + if err == nil { + c.CoreID = lines[0] + } + } + + // override the value of c.Mhz with cpufreq/cpuinfo_max_freq regardless + // of the value from /proc/cpuinfo because we want to report the maximum + // clock-speed of the CPU for c.Mhz, matching the behaviour of Windows + lines, err = common.ReadLines(sysCPUPath(c.CPU, "cpufreq/cpuinfo_max_freq")) + // if we encounter errors below such as there are no cpuinfo_max_freq file, + // we just ignore. so let Mhz is 0. + if err != nil { + return nil + } + value, err = strconv.ParseFloat(lines[0], 64) + if err != nil { + return nil + } + c.Mhz = value / 1000.0 // value is in kHz + if c.Mhz > 9999 { + c.Mhz = c.Mhz / 1000.0 // value in Hz + } + return nil +} + +// CPUInfo on linux will return 1 item per physical thread. +// +// CPUs have three levels of counting: sockets, cores, threads. +// Cores with HyperThreading count as having 2 threads per core. +// Sockets often come with many physical CPU cores. +// For example a single socket board with two cores each with HT will +// return 4 CPUInfoStat structs on Linux and the "Cores" field set to 1. +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + filename := common.HostProc("cpuinfo") + lines, _ := common.ReadLines(filename) + + var ret []InfoStat + var processorName string + + c := InfoStat{CPU: -1, Cores: 1} + for _, line := range lines { + fields := strings.Split(line, ":") + if len(fields) < 2 { + continue + } + key := strings.TrimSpace(fields[0]) + value := strings.TrimSpace(fields[1]) + + switch key { + case "Processor": + processorName = value + case "processor": + if c.CPU >= 0 { + err := finishCPUInfo(&c) + if err != nil { + return ret, err + } + ret = append(ret, c) + } + c = InfoStat{Cores: 1, ModelName: processorName} + t, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return ret, err + } + c.CPU = int32(t) + case "vendorId", "vendor_id": + c.VendorID = value + case "cpu family": + c.Family = value + case "model": + c.Model = value + case "model name", "cpu": + c.ModelName = value + if strings.Contains(value, "POWER8") || + strings.Contains(value, "POWER7") { + c.Model = strings.Split(value, " ")[0] + c.Family = "POWER" + c.VendorID = "IBM" + } + case "stepping", "revision": + val := value + + if key == "revision" { + val = strings.Split(value, ".")[0] + } + + t, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return ret, err + } + c.Stepping = int32(t) + case "cpu MHz", "clock": + // treat this as the fallback value, thus we ignore error + if t, err := strconv.ParseFloat(strings.Replace(value, "MHz", "", 1), 64); err == nil { + c.Mhz = t + } + case "cache size": + t, err := strconv.ParseInt(strings.Replace(value, " KB", "", 1), 10, 64) + if err != nil { + return ret, err + } + c.CacheSize = int32(t) + case "physical id": + c.PhysicalID = value + case "core id": + c.CoreID = value + case "flags", "Features": + c.Flags = strings.FieldsFunc(value, func(r rune) bool { + return r == ',' || r == ' ' + }) + case "microcode": + c.Microcode = value + } + } + if c.CPU >= 0 { + err := finishCPUInfo(&c) + if err != nil { + return ret, err + } + ret = append(ret, c) + } + return ret, nil +} + +func parseStatLine(line string) (*TimesStat, error) { + fields := strings.Fields(line) + + if len(fields) == 0 { + return nil, errors.New("stat does not contain cpu info") + } + + if strings.HasPrefix(fields[0], "cpu") == false { + return nil, errors.New("not contain cpu") + } + + cpu := fields[0] + if cpu == "cpu" { + cpu = "cpu-total" + } + user, err := strconv.ParseFloat(fields[1], 64) + if err != nil { + return nil, err + } + nice, err := strconv.ParseFloat(fields[2], 64) + if err != nil { + return nil, err + } + system, err := strconv.ParseFloat(fields[3], 64) + if err != nil { + return nil, err + } + idle, err := strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, err + } + iowait, err := strconv.ParseFloat(fields[5], 64) + if err != nil { + return nil, err + } + irq, err := strconv.ParseFloat(fields[6], 64) + if err != nil { + return nil, err + } + softirq, err := strconv.ParseFloat(fields[7], 64) + if err != nil { + return nil, err + } + + ct := &TimesStat{ + CPU: cpu, + User: user / CPUTick, + Nice: nice / CPUTick, + System: system / CPUTick, + Idle: idle / CPUTick, + Iowait: iowait / CPUTick, + Irq: irq / CPUTick, + Softirq: softirq / CPUTick, + } + if len(fields) > 8 { // Linux >= 2.6.11 + steal, err := strconv.ParseFloat(fields[8], 64) + if err != nil { + return nil, err + } + ct.Steal = steal / CPUTick + } + if len(fields) > 9 { // Linux >= 2.6.24 + guest, err := strconv.ParseFloat(fields[9], 64) + if err != nil { + return nil, err + } + ct.Guest = guest / CPUTick + } + if len(fields) > 10 { // Linux >= 3.2.0 + guestNice, err := strconv.ParseFloat(fields[10], 64) + if err != nil { + return nil, err + } + ct.GuestNice = guestNice / CPUTick + } + + return ct, nil +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_openbsd.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_openbsd.go new file mode 100644 index 0000000..0177626 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_openbsd.go @@ -0,0 +1,147 @@ +// +build openbsd + +package cpu + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "os/exec" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +// sys/sched.h +var ( + CPUser = 0 + CPNice = 1 + CPSys = 2 + CPIntr = 3 + CPIdle = 4 + CPUStates = 5 +) + +// sys/sysctl.h +const ( + CTLKern = 1 // "high kernel": proc, limits + KernCptime = 40 // KERN_CPTIME + KernCptime2 = 71 // KERN_CPTIME2 +) + +var ClocksPerSec = float64(128) + +func init() { + func() { + getconf, err := exec.LookPath("/usr/bin/getconf") + if err != nil { + return + } + out, err := invoke.Command(getconf, "CLK_TCK") + // ignore errors + if err == nil { + i, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64) + if err == nil { + ClocksPerSec = float64(i) + } + } + }() + func() { + v, err := unix.Sysctl("kern.osrelease") // can't reuse host.PlatformInformation because of circular import + if err != nil { + return + } + v = strings.ToLower(v) + version, err := strconv.ParseFloat(v, 64) + if err != nil { + return + } + if version >= 6.4 { + CPIntr = 4 + CPIdle = 5 + CPUStates = 6 + } + }() +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + var ret []TimesStat + + var ncpu int + if percpu { + ncpu, _ = Counts(true) + } else { + ncpu = 1 + } + + for i := 0; i < ncpu; i++ { + var cpuTimes = make([]int64, CPUStates) + var mib []int32 + if percpu { + mib = []int32{CTLKern, KernCptime} + } else { + mib = []int32{CTLKern, KernCptime2, int32(i)} + } + buf, _, err := common.CallSyscall(mib) + if err != nil { + return ret, err + } + + br := bytes.NewReader(buf) + err = binary.Read(br, binary.LittleEndian, &cpuTimes) + if err != nil { + return ret, err + } + c := TimesStat{ + User: float64(cpuTimes[CPUser]) / ClocksPerSec, + Nice: float64(cpuTimes[CPNice]) / ClocksPerSec, + System: float64(cpuTimes[CPSys]) / ClocksPerSec, + Idle: float64(cpuTimes[CPIdle]) / ClocksPerSec, + Irq: float64(cpuTimes[CPIntr]) / ClocksPerSec, + } + if !percpu { + c.CPU = "cpu-total" + } else { + c.CPU = fmt.Sprintf("cpu%d", i) + } + ret = append(ret, c) + } + + return ret, nil +} + +// Returns only one (minimal) CPUInfoStat on OpenBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + var err error + + c := InfoStat{} + + var u32 uint32 + if u32, err = unix.SysctlUint32("hw.cpuspeed"); err != nil { + return nil, err + } + c.Mhz = float64(u32) + + if u32, err = unix.SysctlUint32("hw.ncpuonline"); err != nil { + return nil, err + } + c.Cores = int32(u32) + + if c.ModelName, err = unix.Sysctl("hw.model"); err != nil { + return nil, err + } + + return append(ret, c), nil +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_solaris.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_solaris.go new file mode 100644 index 0000000..117fd90 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_solaris.go @@ -0,0 +1,198 @@ +package cpu + +import ( + "context" + "errors" + "fmt" + "os/exec" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" +) + +var ClocksPerSec = float64(128) + +func init() { + getconf, err := exec.LookPath("/usr/bin/getconf") + if err != nil { + return + } + out, err := invoke.Command(getconf, "CLK_TCK") + // ignore errors + if err == nil { + i, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64) + if err == nil { + ClocksPerSec = float64(i) + } + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + return []TimesStat{}, common.ErrNotImplementedError +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + psrInfo, err := exec.LookPath("/usr/sbin/psrinfo") + if err != nil { + return nil, fmt.Errorf("cannot find psrinfo: %s", err) + } + psrInfoOut, err := invoke.CommandWithContext(ctx, psrInfo, "-p", "-v") + if err != nil { + return nil, fmt.Errorf("cannot execute psrinfo: %s", err) + } + + isaInfo, err := exec.LookPath("/usr/bin/isainfo") + if err != nil { + return nil, fmt.Errorf("cannot find isainfo: %s", err) + } + isaInfoOut, err := invoke.CommandWithContext(ctx, isaInfo, "-b", "-v") + if err != nil { + return nil, fmt.Errorf("cannot execute isainfo: %s", err) + } + + procs, err := parseProcessorInfo(string(psrInfoOut)) + if err != nil { + return nil, fmt.Errorf("error parsing psrinfo output: %s", err) + } + + flags, err := parseISAInfo(string(isaInfoOut)) + if err != nil { + return nil, fmt.Errorf("error parsing isainfo output: %s", err) + } + + result := make([]InfoStat, 0, len(flags)) + for _, proc := range procs { + procWithFlags := proc + procWithFlags.Flags = flags + result = append(result, procWithFlags) + } + + return result, nil +} + +var flagsMatch = regexp.MustCompile(`[\w\.]+`) + +func parseISAInfo(cmdOutput string) ([]string, error) { + words := flagsMatch.FindAllString(cmdOutput, -1) + + // Sanity check the output + if len(words) < 4 || words[1] != "bit" || words[3] != "applications" { + return nil, errors.New("attempted to parse invalid isainfo output") + } + + flags := make([]string, len(words)-4) + for i, val := range words[4:] { + flags[i] = val + } + sort.Strings(flags) + + return flags, nil +} + +var psrInfoMatch = regexp.MustCompile(`The physical processor has (?:([\d]+) virtual processor \(([\d]+)\)|([\d]+) cores and ([\d]+) virtual processors[^\n]+)\n(?:\s+ The core has.+\n)*\s+.+ \((\w+) ([\S]+) family (.+) model (.+) step (.+) clock (.+) MHz\)\n[\s]*(.*)`) + +const ( + psrNumCoresOffset = 1 + psrNumCoresHTOffset = 3 + psrNumHTOffset = 4 + psrVendorIDOffset = 5 + psrFamilyOffset = 7 + psrModelOffset = 8 + psrStepOffset = 9 + psrClockOffset = 10 + psrModelNameOffset = 11 +) + +func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { + matches := psrInfoMatch.FindAllStringSubmatch(cmdOutput, -1) + + var infoStatCount int32 + result := make([]InfoStat, 0, len(matches)) + for physicalIndex, physicalCPU := range matches { + var step int32 + var clock float64 + + if physicalCPU[psrStepOffset] != "" { + stepParsed, err := strconv.ParseInt(physicalCPU[psrStepOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for step as 32-bit integer: %s", physicalCPU[9], err) + } + step = int32(stepParsed) + } + + if physicalCPU[psrClockOffset] != "" { + clockParsed, err := strconv.ParseInt(physicalCPU[psrClockOffset], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for clock as 32-bit integer: %s", physicalCPU[10], err) + } + clock = float64(clockParsed) + } + + var err error + var numCores int64 + var numHT int64 + switch { + case physicalCPU[psrNumCoresOffset] != "": + numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[1], err) + } + + for i := 0; i < int(numCores); i++ { + result = append(result, InfoStat{ + CPU: infoStatCount, + PhysicalID: strconv.Itoa(physicalIndex), + CoreID: strconv.Itoa(i), + Cores: 1, + VendorID: physicalCPU[psrVendorIDOffset], + ModelName: physicalCPU[psrModelNameOffset], + Family: physicalCPU[psrFamilyOffset], + Model: physicalCPU[psrModelOffset], + Stepping: step, + Mhz: clock, + }) + infoStatCount++ + } + case physicalCPU[psrNumCoresHTOffset] != "": + numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresHTOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[3], err) + } + + numHT, err = strconv.ParseInt(physicalCPU[psrNumHTOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for hyperthread count as 32-bit integer: %s", physicalCPU[4], err) + } + + for i := 0; i < int(numCores); i++ { + result = append(result, InfoStat{ + CPU: infoStatCount, + PhysicalID: strconv.Itoa(physicalIndex), + CoreID: strconv.Itoa(i), + Cores: int32(numHT) / int32(numCores), + VendorID: physicalCPU[psrVendorIDOffset], + ModelName: physicalCPU[psrModelNameOffset], + Family: physicalCPU[psrFamilyOffset], + Model: physicalCPU[psrModelOffset], + Stepping: step, + Mhz: clock, + }) + infoStatCount++ + } + default: + return nil, errors.New("values for cores with and without hyperthreading are both set") + } + } + return result, nil +} diff --git a/vendor/github.com/shirou/gopsutil/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/cpu/cpu_windows.go new file mode 100644 index 0000000..3959212 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/cpu/cpu_windows.go @@ -0,0 +1,171 @@ +// +build windows + +package cpu + +import ( + "context" + "fmt" + "unsafe" + + "github.com/StackExchange/wmi" + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/windows" +) + +type Win32_Processor struct { + LoadPercentage *uint16 + Family uint16 + Manufacturer string + Name string + NumberOfLogicalProcessors uint32 + ProcessorID *string + Stepping *string + MaxClockSpeed uint32 +} + +type win32_PerfRawData_Counters_ProcessorInformation struct { + Name string + PercentDPCTime uint64 + PercentIdleTime uint64 + PercentUserTime uint64 + PercentProcessorTime uint64 + PercentInterruptTime uint64 + PercentPriorityTime uint64 + PercentPrivilegedTime uint64 + InterruptsPerSec uint32 + ProcessorFrequency uint32 + DPCRate uint32 +} + +// Win32_PerfFormattedData_PerfOS_System struct to have count of processes and processor queue length +type Win32_PerfFormattedData_PerfOS_System struct { + Processes uint32 + ProcessorQueueLength uint32 +} + +const ( + win32_TicksPerSecond = 10000000.0 +) + +// Times returns times stat per cpu and combined for all CPUs +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + return perCPUTimesWithContext(ctx) + } + + var ret []TimesStat + var lpIdleTime common.FILETIME + var lpKernelTime common.FILETIME + var lpUserTime common.FILETIME + r, _, _ := common.ProcGetSystemTimes.Call( + uintptr(unsafe.Pointer(&lpIdleTime)), + uintptr(unsafe.Pointer(&lpKernelTime)), + uintptr(unsafe.Pointer(&lpUserTime))) + if r == 0 { + return ret, windows.GetLastError() + } + + LOT := float64(0.0000001) + HIT := (LOT * 4294967296.0) + idle := ((HIT * float64(lpIdleTime.DwHighDateTime)) + (LOT * float64(lpIdleTime.DwLowDateTime))) + user := ((HIT * float64(lpUserTime.DwHighDateTime)) + (LOT * float64(lpUserTime.DwLowDateTime))) + kernel := ((HIT * float64(lpKernelTime.DwHighDateTime)) + (LOT * float64(lpKernelTime.DwLowDateTime))) + system := (kernel - idle) + + ret = append(ret, TimesStat{ + CPU: "cpu-total", + Idle: float64(idle), + User: float64(user), + System: float64(system), + }) + return ret, nil +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + var dst []Win32_Processor + q := wmi.CreateQuery(&dst, "") + if err := common.WMIQueryWithContext(ctx, q, &dst); err != nil { + return ret, err + } + + var procID string + for i, l := range dst { + procID = "" + if l.ProcessorID != nil { + procID = *l.ProcessorID + } + + cpu := InfoStat{ + CPU: int32(i), + Family: fmt.Sprintf("%d", l.Family), + VendorID: l.Manufacturer, + ModelName: l.Name, + Cores: int32(l.NumberOfLogicalProcessors), + PhysicalID: procID, + Mhz: float64(l.MaxClockSpeed), + Flags: []string{}, + } + ret = append(ret, cpu) + } + + return ret, nil +} + +// PerfInfo returns the performance counter's instance value for ProcessorInformation. +// Name property is the key by which overall, per cpu and per core metric is known. +func perfInfoWithContext(ctx context.Context) ([]win32_PerfRawData_Counters_ProcessorInformation, error) { + var ret []win32_PerfRawData_Counters_ProcessorInformation + + q := wmi.CreateQuery(&ret, "WHERE NOT Name LIKE '%_Total'") + err := common.WMIQueryWithContext(ctx, q, &ret) + if err != nil { + return []win32_PerfRawData_Counters_ProcessorInformation{}, err + } + + return ret, err +} + +// ProcInfo returns processes count and processor queue length in the system. +// There is a single queue for processor even on multiprocessors systems. +func ProcInfo() ([]Win32_PerfFormattedData_PerfOS_System, error) { + return ProcInfoWithContext(context.Background()) +} + +func ProcInfoWithContext(ctx context.Context) ([]Win32_PerfFormattedData_PerfOS_System, error) { + var ret []Win32_PerfFormattedData_PerfOS_System + q := wmi.CreateQuery(&ret, "") + err := common.WMIQueryWithContext(ctx, q, &ret) + if err != nil { + return []Win32_PerfFormattedData_PerfOS_System{}, err + } + return ret, err +} + +// perCPUTimes returns times stat per cpu, per core and overall for all CPUs +func perCPUTimesWithContext(ctx context.Context) ([]TimesStat, error) { + var ret []TimesStat + stats, err := perfInfoWithContext(ctx) + if err != nil { + return nil, err + } + for _, v := range stats { + c := TimesStat{ + CPU: v.Name, + User: float64(v.PercentUserTime) / win32_TicksPerSecond, + System: float64(v.PercentPrivilegedTime) / win32_TicksPerSecond, + Idle: float64(v.PercentIdleTime) / win32_TicksPerSecond, + Irq: float64(v.PercentInterruptTime) / win32_TicksPerSecond, + } + ret = append(ret, c) + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk.go b/vendor/github.com/shirou/gopsutil/disk/disk.go new file mode 100644 index 0000000..38d8a8f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk.go @@ -0,0 +1,61 @@ +package disk + +import ( + "encoding/json" + + "github.com/shirou/gopsutil/internal/common" +) + +var invoke common.Invoker = common.Invoke{} + +type UsageStat struct { + Path string `json:"path"` + Fstype string `json:"fstype"` + Total uint64 `json:"total"` + Free uint64 `json:"free"` + Used uint64 `json:"used"` + UsedPercent float64 `json:"usedPercent"` + InodesTotal uint64 `json:"inodesTotal"` + InodesUsed uint64 `json:"inodesUsed"` + InodesFree uint64 `json:"inodesFree"` + InodesUsedPercent float64 `json:"inodesUsedPercent"` +} + +type PartitionStat struct { + Device string `json:"device"` + Mountpoint string `json:"mountpoint"` + Fstype string `json:"fstype"` + Opts string `json:"opts"` +} + +type IOCountersStat struct { + ReadCount uint64 `json:"readCount"` + MergedReadCount uint64 `json:"mergedReadCount"` + WriteCount uint64 `json:"writeCount"` + MergedWriteCount uint64 `json:"mergedWriteCount"` + ReadBytes uint64 `json:"readBytes"` + WriteBytes uint64 `json:"writeBytes"` + ReadTime uint64 `json:"readTime"` + WriteTime uint64 `json:"writeTime"` + IopsInProgress uint64 `json:"iopsInProgress"` + IoTime uint64 `json:"ioTime"` + WeightedIO uint64 `json:"weightedIO"` + Name string `json:"name"` + SerialNumber string `json:"serialNumber"` + Label string `json:"label"` +} + +func (d UsageStat) String() string { + s, _ := json.Marshal(d) + return string(s) +} + +func (d PartitionStat) String() string { + s, _ := json.Marshal(d) + return string(s) +} + +func (d IOCountersStat) String() string { + s, _ := json.Marshal(d) + return string(s) +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin.go new file mode 100644 index 0000000..2b1d000 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin.go @@ -0,0 +1,118 @@ +// +build darwin + +package disk + +import ( + "context" + "path" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + var ret []PartitionStat + + count, err := Getfsstat(nil, MntWait) + if err != nil { + return ret, err + } + fs := make([]Statfs, count) + if _, err = Getfsstat(fs, MntWait); err != nil { + return ret, err + } + for _, stat := range fs { + opts := "rw" + if stat.Flags&MntReadOnly != 0 { + opts = "ro" + } + if stat.Flags&MntSynchronous != 0 { + opts += ",sync" + } + if stat.Flags&MntNoExec != 0 { + opts += ",noexec" + } + if stat.Flags&MntNoSuid != 0 { + opts += ",nosuid" + } + if stat.Flags&MntUnion != 0 { + opts += ",union" + } + if stat.Flags&MntAsync != 0 { + opts += ",async" + } + if stat.Flags&MntSuidDir != 0 { + opts += ",suiddir" + } + if stat.Flags&MntSoftDep != 0 { + opts += ",softdep" + } + if stat.Flags&MntNoSymFollow != 0 { + opts += ",nosymfollow" + } + if stat.Flags&MntGEOMJournal != 0 { + opts += ",gjounalc" + } + if stat.Flags&MntMultilabel != 0 { + opts += ",multilabel" + } + if stat.Flags&MntACLs != 0 { + opts += ",acls" + } + if stat.Flags&MntNoATime != 0 { + opts += ",noattime" + } + if stat.Flags&MntClusterRead != 0 { + opts += ",nocluster" + } + if stat.Flags&MntClusterWrite != 0 { + opts += ",noclusterw" + } + if stat.Flags&MntNFS4ACLs != 0 { + opts += ",nfs4acls" + } + d := PartitionStat{ + Device: common.IntToString(stat.Mntfromname[:]), + Mountpoint: common.IntToString(stat.Mntonname[:]), + Fstype: common.IntToString(stat.Fstypename[:]), + Opts: opts, + } + if all == false { + if !path.IsAbs(d.Device) || !common.PathExists(d.Device) { + continue + } + } + + ret = append(ret, d) + } + + return ret, nil +} + +func Getfsstat(buf []Statfs, flags int) (n int, err error) { + return GetfsstatWithContext(context.Background(), buf, flags) +} + +func GetfsstatWithContext(ctx context.Context, buf []Statfs, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs{}) * uintptr(len(buf)) + } + r0, _, e1 := unix.Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func getFsType(stat unix.Statfs_t) string { + return common.IntToString(stat.Fstypename[:]) +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin.h b/vendor/github.com/shirou/gopsutil/disk/disk_darwin.h new file mode 100644 index 0000000..d0fe514 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin.h @@ -0,0 +1,164 @@ +#include +#include +#include +#include +#include + +// The iterator of all things disk. Allocated by StartIOCounterFetch, released +// by EndIOCounterFetch. +static io_iterator_t diskIter; + +// Begins fetching IO counters. +// +// Returns 1 if the fetch started successfully, false otherwise. +// +// If the fetch was started successfully, you must call EndIOCounterFetch once +// done to release resources. +int StartIOCounterFetch() +{ + if (IOServiceGetMatchingServices(kIOMasterPortDefault, + IOServiceMatching(kIOMediaClass), + &diskIter) != kIOReturnSuccess) { + return 0; + } + + return 1; +} + +// Releases resources from fetching IO counters. +void EndIOCounterFetch() +{ + IOObjectRelease(diskIter); +} + +// The current disk entry of interest. Allocated by FetchNextDisk(), released by +// ReadDiskInfo(). +static io_registry_entry_t diskEntry; + +// The parent of diskEntry. Same lifetimes. +static io_registry_entry_t parentEntry; + +// Fetches the next disk. Note that a disk entry is allocated, and will be held +// until it is processed and freed by ReadDiskInfo. +int FetchNextDisk() +{ + while ((diskEntry = IOIteratorNext(diskIter)) != 0) { + // We are iterating IOMedia. We need to get the parent too (IOBSD). + if (IORegistryEntryGetParentEntry(diskEntry, kIOServicePlane, &parentEntry) != kIOReturnSuccess) { + // something is wrong... + IOObjectRelease(diskEntry); + continue; + } + + if (!IOObjectConformsTo(parentEntry, "IOBlockStorageDriver")) { + // no use to us, try the next disk + IOObjectRelease(diskEntry); + IOObjectRelease(parentEntry); + continue; + } + + // Got a disk OK. + return 1; + } + + // No more disks. + return 0; +} + +// Reads the current disk (from iteration) info into DiskInfo struct. +// Once done, all resources from the current iteration of reading are freed, +// ready for FetchNextDisk() to be called again. +int ReadDiskInfo(DiskInfo *info) +{ + // Parent props. Allocated by us. + CFDictionaryRef parentProps = NULL; + + // Disk props. Allocated by us. + CFDictionaryRef diskProps = NULL; + + // Disk stats, fetched by us, but not allocated by us. + CFDictionaryRef stats = NULL; + + if (IORegistryEntryCreateCFProperties(diskEntry, (CFMutableDictionaryRef *)&parentProps, + kCFAllocatorDefault, kNilOptions) != kIOReturnSuccess) + { + // can't get parent props, give up + CFRelease(parentProps); + IOObjectRelease(diskEntry); + IOObjectRelease(parentEntry); + return -1; + } + + if (IORegistryEntryCreateCFProperties(parentEntry, (CFMutableDictionaryRef *)&diskProps, + kCFAllocatorDefault, kNilOptions) != kIOReturnSuccess) + { + // can't get disk props, give up + CFRelease(parentProps); + CFRelease(diskProps); + IOObjectRelease(diskEntry); + IOObjectRelease(parentEntry); + return -1; + } + + // Start fetching + CFStringRef cfDiskName = (CFStringRef)CFDictionaryGetValue(parentProps, CFSTR(kIOBSDNameKey)); + CFStringGetCString(cfDiskName, info->DiskName, MAX_DISK_NAME, CFStringGetSystemEncoding()); + stats = (CFDictionaryRef)CFDictionaryGetValue( diskProps, CFSTR(kIOBlockStorageDriverStatisticsKey)); + + if (stats == NULL) { + // stat fetch failed... + CFRelease(parentProps); + CFRelease(diskProps); + IOObjectRelease(parentEntry); + IOObjectRelease(diskEntry); + return -1; + } + + CFNumberRef cfnum; + + if ((cfnum = (CFNumberRef)CFDictionaryGetValue(stats, CFSTR(kIOBlockStorageDriverStatisticsReadsKey)))) { + CFNumberGetValue(cfnum, kCFNumberSInt64Type, &info->Reads); + } else { + info->Reads = 0; + } + + if ((cfnum = (CFNumberRef)CFDictionaryGetValue(stats, CFSTR(kIOBlockStorageDriverStatisticsWritesKey)))) { + CFNumberGetValue(cfnum, kCFNumberSInt64Type, &info->Writes); + } else { + info->Writes = 0; + } + + if ((cfnum = (CFNumberRef)CFDictionaryGetValue(stats, CFSTR(kIOBlockStorageDriverStatisticsBytesReadKey)))) { + CFNumberGetValue(cfnum, kCFNumberSInt64Type, &info->ReadBytes); + } else { + info->ReadBytes = 0; + } + + if ((cfnum = (CFNumberRef)CFDictionaryGetValue(stats, CFSTR(kIOBlockStorageDriverStatisticsBytesWrittenKey)))) { + CFNumberGetValue(cfnum, kCFNumberSInt64Type, &info->WriteBytes); + } else { + info->WriteBytes = 0; + } + + if ((cfnum = (CFNumberRef)CFDictionaryGetValue(stats, CFSTR(kIOBlockStorageDriverStatisticsTotalReadTimeKey)))) { + CFNumberGetValue(cfnum, kCFNumberSInt64Type, &info->ReadTime); + } else { + info->ReadTime = 0; + } + if ((cfnum = (CFNumberRef)CFDictionaryGetValue(stats, CFSTR(kIOBlockStorageDriverStatisticsTotalWriteTimeKey)))) { + CFNumberGetValue(cfnum, kCFNumberSInt64Type, &info->WriteTime); + } else { + info->WriteTime = 0; + } + + // note: read/write time are in ns, but we want ms. + info->ReadTime = info->ReadTime / 1000 / 1000; + info->WriteTime = info->WriteTime / 1000 / 1000; + + CFRelease(parentProps); + CFRelease(diskProps); + IOObjectRelease(parentEntry); + IOObjectRelease(diskEntry); + return 0; +} + diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin_386.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_386.go new file mode 100644 index 0000000..bd83a4a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_386.go @@ -0,0 +1,59 @@ +// +build darwin +// +build 386 + +package disk + +const ( + MntWait = 1 + MfsNameLen = 15 /* length of fs type name, not inc. nul */ + MNameLen = 90 /* length of buffer for returned name */ + + MFSTYPENAMELEN = 16 /* length of fs type name including null */ + MAXPATHLEN = 1024 + MNAMELEN = MAXPATHLEN + + SYS_GETFSSTAT64 = 347 +) + +type Fsid struct{ val [2]int32 } /* file system id type */ +type uid_t int32 + +// sys/mount.h +const ( + MntReadOnly = 0x00000001 /* read only filesystem */ + MntSynchronous = 0x00000002 /* filesystem written synchronously */ + MntNoExec = 0x00000004 /* can't exec from filesystem */ + MntNoSuid = 0x00000008 /* don't honor setuid bits on fs */ + MntUnion = 0x00000020 /* union with underlying filesystem */ + MntAsync = 0x00000040 /* filesystem written asynchronously */ + MntSuidDir = 0x00100000 /* special handling of SUID on dirs */ + MntSoftDep = 0x00200000 /* soft updates being done */ + MntNoSymFollow = 0x00400000 /* do not follow symlinks */ + MntGEOMJournal = 0x02000000 /* GEOM journal support enabled */ + MntMultilabel = 0x04000000 /* MAC support for individual objects */ + MntACLs = 0x08000000 /* ACL support enabled */ + MntNoATime = 0x10000000 /* disable update of file access time */ + MntClusterRead = 0x40000000 /* disable cluster read */ + MntClusterWrite = 0x80000000 /* disable cluster write */ + MntNFS4ACLs = 0x00000010 +) + +// https://github.com/golang/go/blob/master/src/syscall/ztypes_darwin_386.go#L82 +type Statfs struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_amd64.go new file mode 100644 index 0000000..ec40a75 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_amd64.go @@ -0,0 +1,58 @@ +// +build darwin +// +build amd64 + +package disk + +const ( + MntWait = 1 + MfsNameLen = 15 /* length of fs type name, not inc. nul */ + MNameLen = 90 /* length of buffer for returned name */ + + MFSTYPENAMELEN = 16 /* length of fs type name including null */ + MAXPATHLEN = 1024 + MNAMELEN = MAXPATHLEN + + SYS_GETFSSTAT64 = 347 +) + +type Fsid struct{ val [2]int32 } /* file system id type */ +type uid_t int32 + +// sys/mount.h +const ( + MntReadOnly = 0x00000001 /* read only filesystem */ + MntSynchronous = 0x00000002 /* filesystem written synchronously */ + MntNoExec = 0x00000004 /* can't exec from filesystem */ + MntNoSuid = 0x00000008 /* don't honor setuid bits on fs */ + MntUnion = 0x00000020 /* union with underlying filesystem */ + MntAsync = 0x00000040 /* filesystem written asynchronously */ + MntSuidDir = 0x00100000 /* special handling of SUID on dirs */ + MntSoftDep = 0x00200000 /* soft updates being done */ + MntNoSymFollow = 0x00400000 /* do not follow symlinks */ + MntGEOMJournal = 0x02000000 /* GEOM journal support enabled */ + MntMultilabel = 0x04000000 /* MAC support for individual objects */ + MntACLs = 0x08000000 /* ACL support enabled */ + MntNoATime = 0x10000000 /* disable update of file access time */ + MntClusterRead = 0x40000000 /* disable cluster read */ + MntClusterWrite = 0x80000000 /* disable cluster write */ + MntNFS4ACLs = 0x00000010 +) + +type Statfs struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_arm64.go new file mode 100644 index 0000000..0e3f670 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_arm64.go @@ -0,0 +1,58 @@ +// +build darwin +// +build arm64 + +package disk + +const ( + MntWait = 1 + MfsNameLen = 15 /* length of fs type name, not inc. nul */ + MNameLen = 90 /* length of buffer for returned name */ + + MFSTYPENAMELEN = 16 /* length of fs type name including null */ + MAXPATHLEN = 1024 + MNAMELEN = MAXPATHLEN + + SYS_GETFSSTAT64 = 347 +) + +type Fsid struct{ val [2]int32 } /* file system id type */ +type uid_t int32 + +// sys/mount.h +const ( + MntReadOnly = 0x00000001 /* read only filesystem */ + MntSynchronous = 0x00000002 /* filesystem written synchronously */ + MntNoExec = 0x00000004 /* can't exec from filesystem */ + MntNoSuid = 0x00000008 /* don't honor setuid bits on fs */ + MntUnion = 0x00000020 /* union with underlying filesystem */ + MntAsync = 0x00000040 /* filesystem written asynchronously */ + MntSuidDir = 0x00100000 /* special handling of SUID on dirs */ + MntSoftDep = 0x00200000 /* soft updates being done */ + MntNoSymFollow = 0x00400000 /* do not follow symlinks */ + MntGEOMJournal = 0x02000000 /* GEOM journal support enabled */ + MntMultilabel = 0x04000000 /* MAC support for individual objects */ + MntACLs = 0x08000000 /* ACL support enabled */ + MntNoATime = 0x10000000 /* disable update of file access time */ + MntClusterRead = 0x40000000 /* disable cluster read */ + MntClusterWrite = 0x80000000 /* disable cluster write */ + MntNFS4ACLs = 0x00000010 +) + +type Statfs struct { + Bsize uint32 + Iosize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Owner uint32 + Type uint32 + Flags uint32 + Fssubtype uint32 + Fstypename [16]int8 + Mntonname [1024]int8 + Mntfromname [1024]int8 + Reserved [8]uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_cgo.go new file mode 100644 index 0000000..480e237 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_cgo.go @@ -0,0 +1,95 @@ +// +build darwin +// +build cgo + +package disk + +/* +#cgo LDFLAGS: -lobjc -framework Foundation -framework IOKit +#include + +// ### enough? +const int MAX_DISK_NAME = 100; + +typedef struct +{ + char DiskName[MAX_DISK_NAME]; + int64_t Reads; + int64_t Writes; + int64_t ReadBytes; + int64_t WriteBytes; + int64_t ReadTime; + int64_t WriteTime; +} DiskInfo; + +#include "disk_darwin.h" +*/ +import "C" + +import ( + "context" + "errors" + "strings" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" +) + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + if C.StartIOCounterFetch() == 0 { + return nil, errors.New("Unable to fetch disk list") + } + + // Clean up when we are done. + defer C.EndIOCounterFetch() + ret := make(map[string]IOCountersStat, 0) + + for { + res := C.FetchNextDisk() + if res == -1 { + return nil, errors.New("Unable to fetch disk information") + } else if res == 0 { + break // done + } + + di := C.DiskInfo{} + if C.ReadDiskInfo((*C.DiskInfo)(unsafe.Pointer(&di))) == -1 { + return nil, errors.New("Unable to fetch disk properties") + } + + // Used to only get the necessary part of the C string. + isRuneNull := func(r rune) bool { + return r == '\u0000' + } + + // Map from the darwin-specific C struct to the Go type + // + // ### missing: IopsInProgress, WeightedIO, MergedReadCount, + // MergedWriteCount, SerialNumber + // IOKit can give us at least the serial number I think... + d := IOCountersStat{ + // Note: The Go type wants unsigned values, but CFNumberGetValue + // doesn't appear to be able to give us unsigned values. So, we + // cast, and hope for the best. + ReadBytes: uint64(di.ReadBytes), + WriteBytes: uint64(di.WriteBytes), + ReadCount: uint64(di.Reads), + WriteCount: uint64(di.Writes), + ReadTime: uint64(di.ReadTime), + WriteTime: uint64(di.WriteTime), + IoTime: uint64(di.ReadTime + di.WriteTime), + Name: strings.TrimFunc(C.GoStringN(&di.DiskName[0], C.MAX_DISK_NAME), isRuneNull), + } + + if len(names) > 0 && !common.StringsHas(names, d.Name) { + continue + } + + ret[d.Name] = d + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_nocgo.go new file mode 100644 index 0000000..fe76d83 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_darwin_nocgo.go @@ -0,0 +1,18 @@ +// +build darwin +// +build !cgo + +package disk + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_fallback.go b/vendor/github.com/shirou/gopsutil/disk/disk_fallback.go new file mode 100644 index 0000000..22eb507 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_fallback.go @@ -0,0 +1,33 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!windows,!solaris + +package disk + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + return []PartitionStat{}, common.ErrNotImplementedError +} + +func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_freebsd.go b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd.go new file mode 100644 index 0000000..2e0966a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd.go @@ -0,0 +1,196 @@ +// +build freebsd + +package disk + +import ( + "bytes" + "context" + "encoding/binary" + "path" + "strconv" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/internal/common" +) + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + var ret []PartitionStat + + // get length + count, err := unix.Getfsstat(nil, MNT_WAIT) + if err != nil { + return ret, err + } + + fs := make([]Statfs, count) + if _, err = Getfsstat(fs, MNT_WAIT); err != nil { + return ret, err + } + + for _, stat := range fs { + opts := "rw" + if stat.Flags&MNT_RDONLY != 0 { + opts = "ro" + } + if stat.Flags&MNT_SYNCHRONOUS != 0 { + opts += ",sync" + } + if stat.Flags&MNT_NOEXEC != 0 { + opts += ",noexec" + } + if stat.Flags&MNT_NOSUID != 0 { + opts += ",nosuid" + } + if stat.Flags&MNT_UNION != 0 { + opts += ",union" + } + if stat.Flags&MNT_ASYNC != 0 { + opts += ",async" + } + if stat.Flags&MNT_SUIDDIR != 0 { + opts += ",suiddir" + } + if stat.Flags&MNT_SOFTDEP != 0 { + opts += ",softdep" + } + if stat.Flags&MNT_NOSYMFOLLOW != 0 { + opts += ",nosymfollow" + } + if stat.Flags&MNT_GJOURNAL != 0 { + opts += ",gjounalc" + } + if stat.Flags&MNT_MULTILABEL != 0 { + opts += ",multilabel" + } + if stat.Flags&MNT_ACLS != 0 { + opts += ",acls" + } + if stat.Flags&MNT_NOATIME != 0 { + opts += ",noattime" + } + if stat.Flags&MNT_NOCLUSTERR != 0 { + opts += ",nocluster" + } + if stat.Flags&MNT_NOCLUSTERW != 0 { + opts += ",noclusterw" + } + if stat.Flags&MNT_NFS4ACLS != 0 { + opts += ",nfs4acls" + } + + d := PartitionStat{ + Device: common.IntToString(stat.Mntfromname[:]), + Mountpoint: common.IntToString(stat.Mntonname[:]), + Fstype: common.IntToString(stat.Fstypename[:]), + Opts: opts, + } + if all == false { + if !path.IsAbs(d.Device) || !common.PathExists(d.Device) { + continue + } + } + + ret = append(ret, d) + } + + return ret, nil +} + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + // statinfo->devinfo->devstat + // /usr/include/devinfo.h + ret := make(map[string]IOCountersStat) + + r, err := unix.Sysctl("kern.devstat.all") + if err != nil { + return nil, err + } + buf := []byte(r) + length := len(buf) + + count := int(uint64(length) / uint64(sizeOfDevstat)) + + buf = buf[8:] // devstat.all has version in the head. + // parse buf to Devstat + for i := 0; i < count; i++ { + b := buf[i*sizeOfDevstat : i*sizeOfDevstat+sizeOfDevstat] + d, err := parseDevstat(b) + if err != nil { + continue + } + un := strconv.Itoa(int(d.Unit_number)) + name := common.IntToString(d.Device_name[:]) + un + + if len(names) > 0 && !common.StringsHas(names, name) { + continue + } + + ds := IOCountersStat{ + ReadCount: d.Operations[DEVSTAT_READ], + WriteCount: d.Operations[DEVSTAT_WRITE], + ReadBytes: d.Bytes[DEVSTAT_READ], + WriteBytes: d.Bytes[DEVSTAT_WRITE], + ReadTime: uint64(d.Duration[DEVSTAT_READ].Compute() * 1000), + WriteTime: uint64(d.Duration[DEVSTAT_WRITE].Compute() * 1000), + IoTime: uint64(d.Busy_time.Compute() * 1000), + Name: name, + } + ret[name] = ds + } + + return ret, nil +} + +func (b Bintime) Compute() float64 { + BINTIME_SCALE := 5.42101086242752217003726400434970855712890625e-20 + return float64(b.Sec) + float64(b.Frac)*BINTIME_SCALE +} + +// BT2LD(time) ((long double)(time).sec + (time).frac * BINTIME_SCALE) + +// Getfsstat is borrowed from pkg/syscall/syscall_freebsd.go +// change Statfs_t to Statfs in order to get more information +func Getfsstat(buf []Statfs, flags int) (n int, err error) { + return GetfsstatWithContext(context.Background(), buf, flags) +} + +func GetfsstatWithContext(ctx context.Context, buf []Statfs, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs{}) * uintptr(len(buf)) + } + r0, _, e1 := unix.Syscall(unix.SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func parseDevstat(buf []byte) (Devstat, error) { + var ds Devstat + br := bytes.NewReader(buf) + // err := binary.Read(br, binary.LittleEndian, &ds) + err := common.Read(br, binary.LittleEndian, &ds) + if err != nil { + return ds, err + } + + return ds, nil +} + +func getFsType(stat unix.Statfs_t) string { + return common.IntToString(stat.Fstypename[:]) +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_386.go b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_386.go new file mode 100644 index 0000000..0b3f536 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_386.go @@ -0,0 +1,112 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package disk + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeofLongDouble = 0x8 + + DEVSTAT_NO_DATA = 0x00 + DEVSTAT_READ = 0x01 + DEVSTAT_WRITE = 0x02 + DEVSTAT_FREE = 0x03 + + MNT_RDONLY = 0x00000001 + MNT_SYNCHRONOUS = 0x00000002 + MNT_NOEXEC = 0x00000004 + MNT_NOSUID = 0x00000008 + MNT_UNION = 0x00000020 + MNT_ASYNC = 0x00000040 + MNT_SUIDDIR = 0x00100000 + MNT_SOFTDEP = 0x00200000 + MNT_NOSYMFOLLOW = 0x00400000 + MNT_GJOURNAL = 0x02000000 + MNT_MULTILABEL = 0x04000000 + MNT_ACLS = 0x08000000 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NFS4ACLS = 0x00000010 + + MNT_WAIT = 1 + MNT_NOWAIT = 2 + MNT_LAZY = 3 + MNT_SUSPEND = 4 +) + +const ( + sizeOfDevstat = 0xf0 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 + _C_long_double int64 +) + +type Statfs struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} +type Fsid struct { + Val [2]int32 +} + +type Devstat struct { + Sequence0 uint32 + Allocated int32 + Start_count uint32 + End_count uint32 + Busy_from Bintime + Dev_links _Ctype_struct___0 + Device_number uint32 + Device_name [16]int8 + Unit_number int32 + Bytes [4]uint64 + Operations [4]uint64 + Duration [4]Bintime + Busy_time Bintime + Creation_time Bintime + Block_size uint32 + Tag_types [3]uint64 + Flags uint32 + Device_type uint32 + Priority uint32 + Id *byte + Sequence1 uint32 +} +type Bintime struct { + Sec int32 + Frac uint64 +} + +type _Ctype_struct___0 struct { + Empty uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_amd64.go new file mode 100644 index 0000000..89b617c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_amd64.go @@ -0,0 +1,115 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package disk + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeofLongDouble = 0x8 + + DEVSTAT_NO_DATA = 0x00 + DEVSTAT_READ = 0x01 + DEVSTAT_WRITE = 0x02 + DEVSTAT_FREE = 0x03 + + MNT_RDONLY = 0x00000001 + MNT_SYNCHRONOUS = 0x00000002 + MNT_NOEXEC = 0x00000004 + MNT_NOSUID = 0x00000008 + MNT_UNION = 0x00000020 + MNT_ASYNC = 0x00000040 + MNT_SUIDDIR = 0x00100000 + MNT_SOFTDEP = 0x00200000 + MNT_NOSYMFOLLOW = 0x00400000 + MNT_GJOURNAL = 0x02000000 + MNT_MULTILABEL = 0x04000000 + MNT_ACLS = 0x08000000 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NFS4ACLS = 0x00000010 + + MNT_WAIT = 1 + MNT_NOWAIT = 2 + MNT_LAZY = 3 + MNT_SUSPEND = 4 +) + +const ( + sizeOfDevstat = 0x120 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 + _C_long_double int64 +) + +type Statfs struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} +type Fsid struct { + Val [2]int32 +} + +type Devstat struct { + Sequence0 uint32 + Allocated int32 + Start_count uint32 + End_count uint32 + Busy_from Bintime + Dev_links _Ctype_struct___0 + Device_number uint32 + Device_name [16]int8 + Unit_number int32 + Bytes [4]uint64 + Operations [4]uint64 + Duration [4]Bintime + Busy_time Bintime + Creation_time Bintime + Block_size uint32 + Pad_cgo_0 [4]byte + Tag_types [3]uint64 + Flags uint32 + Device_type uint32 + Priority uint32 + Pad_cgo_1 [4]byte + ID *byte + Sequence1 uint32 + Pad_cgo_2 [4]byte +} +type Bintime struct { + Sec int64 + Frac uint64 +} + +type _Ctype_struct___0 struct { + Empty uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_arm.go new file mode 100644 index 0000000..0b3f536 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_freebsd_arm.go @@ -0,0 +1,112 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package disk + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeofLongDouble = 0x8 + + DEVSTAT_NO_DATA = 0x00 + DEVSTAT_READ = 0x01 + DEVSTAT_WRITE = 0x02 + DEVSTAT_FREE = 0x03 + + MNT_RDONLY = 0x00000001 + MNT_SYNCHRONOUS = 0x00000002 + MNT_NOEXEC = 0x00000004 + MNT_NOSUID = 0x00000008 + MNT_UNION = 0x00000020 + MNT_ASYNC = 0x00000040 + MNT_SUIDDIR = 0x00100000 + MNT_SOFTDEP = 0x00200000 + MNT_NOSYMFOLLOW = 0x00400000 + MNT_GJOURNAL = 0x02000000 + MNT_MULTILABEL = 0x04000000 + MNT_ACLS = 0x08000000 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NFS4ACLS = 0x00000010 + + MNT_WAIT = 1 + MNT_NOWAIT = 2 + MNT_LAZY = 3 + MNT_SUSPEND = 4 +) + +const ( + sizeOfDevstat = 0xf0 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 + _C_long_double int64 +) + +type Statfs struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [88]int8 + Mntonname [88]int8 +} +type Fsid struct { + Val [2]int32 +} + +type Devstat struct { + Sequence0 uint32 + Allocated int32 + Start_count uint32 + End_count uint32 + Busy_from Bintime + Dev_links _Ctype_struct___0 + Device_number uint32 + Device_name [16]int8 + Unit_number int32 + Bytes [4]uint64 + Operations [4]uint64 + Duration [4]Bintime + Busy_time Bintime + Creation_time Bintime + Block_size uint32 + Tag_types [3]uint64 + Flags uint32 + Device_type uint32 + Priority uint32 + Id *byte + Sequence1 uint32 +} +type Bintime struct { + Sec int32 + Frac uint64 +} + +type _Ctype_struct___0 struct { + Empty uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_linux.go b/vendor/github.com/shirou/gopsutil/disk/disk_linux.go new file mode 100644 index 0000000..a6e5ee3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_linux.go @@ -0,0 +1,474 @@ +// +build linux + +package disk + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +const ( + SectorSize = 512 +) +const ( + // man statfs + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xADFF + BDEVFS_MAGIC = 0x62646576 + BEFS_SUPER_MAGIC = 0x42465331 + BFS_MAGIC = 0x1BADFACE + BINFMTFS_MAGIC = 0x42494e4d + BTRFS_SUPER_MAGIC = 0x9123683E + CGROUP_SUPER_MAGIC = 0x27e0eb + CIFS_MAGIC_NUMBER = 0xFF534D42 + CODA_SUPER_MAGIC = 0x73757245 + COH_SUPER_MAGIC = 0x012FF7B7 + CRAMFS_MAGIC = 0x28cd3d45 + DEBUGFS_MAGIC = 0x64626720 + DEVFS_SUPER_MAGIC = 0x1373 + DEVPTS_SUPER_MAGIC = 0x1cd1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x00414A53 + EXT_SUPER_MAGIC = 0x137D + EXT2_OLD_SUPER_MAGIC = 0xEF51 + EXT2_SUPER_MAGIC = 0xEF53 + EXT3_SUPER_MAGIC = 0xEF53 + EXT4_SUPER_MAGIC = 0xEF53 + FUSE_SUPER_MAGIC = 0x65735546 + FUTEXFS_SUPER_MAGIC = 0xBAD1DEA + HFS_SUPER_MAGIC = 0x4244 + HOSTFS_SUPER_MAGIC = 0x00c0ffee + HPFS_SUPER_MAGIC = 0xF995E849 + HUGETLBFS_MAGIC = 0x958458f6 + ISOFS_SUPER_MAGIC = 0x9660 + JFFS2_SUPER_MAGIC = 0x72b6 + JFS_SUPER_MAGIC = 0x3153464a + MINIX_SUPER_MAGIC = 0x137F /* orig. minix */ + MINIX_SUPER_MAGIC2 = 0x138F /* 30 char minix */ + MINIX2_SUPER_MAGIC = 0x2468 /* minix V2 */ + MINIX2_SUPER_MAGIC2 = 0x2478 /* minix V2, 30 char names */ + MINIX3_SUPER_MAGIC = 0x4d5a /* minix V3 fs, 60 char names */ + MQUEUE_MAGIC = 0x19800202 + MSDOS_SUPER_MAGIC = 0x4d44 + NCP_SUPER_MAGIC = 0x564c + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 + NTFS_SB_MAGIC = 0x5346544e + OCFS2_SUPER_MAGIC = 0x7461636f + OPENPROM_SUPER_MAGIC = 0x9fa1 + PIPEFS_MAGIC = 0x50495045 + PROC_SUPER_MAGIC = 0x9fa0 + PSTOREFS_MAGIC = 0x6165676C + QNX4_SUPER_MAGIC = 0x002f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + REISERFS_SUPER_MAGIC = 0x52654973 + ROMFS_MAGIC = 0x7275 + SELINUX_MAGIC = 0xf97cff8c + SMACK_MAGIC = 0x43415d53 + SMB_SUPER_MAGIC = 0x517B + SOCKFS_MAGIC = 0x534F434B + SQUASHFS_MAGIC = 0x73717368 + SYSFS_MAGIC = 0x62656572 + SYSV2_SUPER_MAGIC = 0x012FF7B6 + SYSV4_SUPER_MAGIC = 0x012FF7B5 + TMPFS_MAGIC = 0x01021994 + UDF_SUPER_MAGIC = 0x15013346 + UFS_MAGIC = 0x00011954 + USBDEVICE_SUPER_MAGIC = 0x9fa2 + V9FS_MAGIC = 0x01021997 + VXFS_SUPER_MAGIC = 0xa501FCF5 + XENFS_SUPER_MAGIC = 0xabba1974 + XENIX_SUPER_MAGIC = 0x012FF7B4 + XFS_SUPER_MAGIC = 0x58465342 + _XIAFS_SUPER_MAGIC = 0x012FD16D + + AFS_SUPER_MAGIC = 0x5346414F + AUFS_SUPER_MAGIC = 0x61756673 + ANON_INODE_FS_SUPER_MAGIC = 0x09041934 + CEPH_SUPER_MAGIC = 0x00C36400 + ECRYPTFS_SUPER_MAGIC = 0xF15F + FAT_SUPER_MAGIC = 0x4006 + FHGFS_SUPER_MAGIC = 0x19830326 + FUSEBLK_SUPER_MAGIC = 0x65735546 + FUSECTL_SUPER_MAGIC = 0x65735543 + GFS_SUPER_MAGIC = 0x1161970 + GPFS_SUPER_MAGIC = 0x47504653 + MTD_INODE_FS_SUPER_MAGIC = 0x11307854 + INOTIFYFS_SUPER_MAGIC = 0x2BAD1DEA + ISOFS_R_WIN_SUPER_MAGIC = 0x4004 + ISOFS_WIN_SUPER_MAGIC = 0x4000 + JFFS_SUPER_MAGIC = 0x07C0 + KAFS_SUPER_MAGIC = 0x6B414653 + LUSTRE_SUPER_MAGIC = 0x0BD00BD0 + NFSD_SUPER_MAGIC = 0x6E667364 + PANFS_SUPER_MAGIC = 0xAAD7AAEA + RPC_PIPEFS_SUPER_MAGIC = 0x67596969 + SECURITYFS_SUPER_MAGIC = 0x73636673 + UFS_BYTESWAPPED_SUPER_MAGIC = 0x54190100 + VMHGFS_SUPER_MAGIC = 0xBACBACBC + VZFS_SUPER_MAGIC = 0x565A4653 + ZFS_SUPER_MAGIC = 0x2FC12FC1 +) + +// coreutils/src/stat.c +var fsTypeMap = map[int64]string{ + ADFS_SUPER_MAGIC: "adfs", /* 0xADF5 local */ + AFFS_SUPER_MAGIC: "affs", /* 0xADFF local */ + AFS_SUPER_MAGIC: "afs", /* 0x5346414F remote */ + ANON_INODE_FS_SUPER_MAGIC: "anon-inode FS", /* 0x09041934 local */ + AUFS_SUPER_MAGIC: "aufs", /* 0x61756673 remote */ + // AUTOFS_SUPER_MAGIC: "autofs", /* 0x0187 local */ + BEFS_SUPER_MAGIC: "befs", /* 0x42465331 local */ + BDEVFS_MAGIC: "bdevfs", /* 0x62646576 local */ + BFS_MAGIC: "bfs", /* 0x1BADFACE local */ + BINFMTFS_MAGIC: "binfmt_misc", /* 0x42494E4D local */ + BTRFS_SUPER_MAGIC: "btrfs", /* 0x9123683E local */ + CEPH_SUPER_MAGIC: "ceph", /* 0x00C36400 remote */ + CGROUP_SUPER_MAGIC: "cgroupfs", /* 0x0027E0EB local */ + CIFS_MAGIC_NUMBER: "cifs", /* 0xFF534D42 remote */ + CODA_SUPER_MAGIC: "coda", /* 0x73757245 remote */ + COH_SUPER_MAGIC: "coh", /* 0x012FF7B7 local */ + CRAMFS_MAGIC: "cramfs", /* 0x28CD3D45 local */ + DEBUGFS_MAGIC: "debugfs", /* 0x64626720 local */ + DEVFS_SUPER_MAGIC: "devfs", /* 0x1373 local */ + DEVPTS_SUPER_MAGIC: "devpts", /* 0x1CD1 local */ + ECRYPTFS_SUPER_MAGIC: "ecryptfs", /* 0xF15F local */ + EFS_SUPER_MAGIC: "efs", /* 0x00414A53 local */ + EXT_SUPER_MAGIC: "ext", /* 0x137D local */ + EXT2_SUPER_MAGIC: "ext2/ext3", /* 0xEF53 local */ + EXT2_OLD_SUPER_MAGIC: "ext2", /* 0xEF51 local */ + FAT_SUPER_MAGIC: "fat", /* 0x4006 local */ + FHGFS_SUPER_MAGIC: "fhgfs", /* 0x19830326 remote */ + FUSEBLK_SUPER_MAGIC: "fuseblk", /* 0x65735546 remote */ + FUSECTL_SUPER_MAGIC: "fusectl", /* 0x65735543 remote */ + FUTEXFS_SUPER_MAGIC: "futexfs", /* 0x0BAD1DEA local */ + GFS_SUPER_MAGIC: "gfs/gfs2", /* 0x1161970 remote */ + GPFS_SUPER_MAGIC: "gpfs", /* 0x47504653 remote */ + HFS_SUPER_MAGIC: "hfs", /* 0x4244 local */ + HPFS_SUPER_MAGIC: "hpfs", /* 0xF995E849 local */ + HUGETLBFS_MAGIC: "hugetlbfs", /* 0x958458F6 local */ + MTD_INODE_FS_SUPER_MAGIC: "inodefs", /* 0x11307854 local */ + INOTIFYFS_SUPER_MAGIC: "inotifyfs", /* 0x2BAD1DEA local */ + ISOFS_SUPER_MAGIC: "isofs", /* 0x9660 local */ + ISOFS_R_WIN_SUPER_MAGIC: "isofs", /* 0x4004 local */ + ISOFS_WIN_SUPER_MAGIC: "isofs", /* 0x4000 local */ + JFFS_SUPER_MAGIC: "jffs", /* 0x07C0 local */ + JFFS2_SUPER_MAGIC: "jffs2", /* 0x72B6 local */ + JFS_SUPER_MAGIC: "jfs", /* 0x3153464A local */ + KAFS_SUPER_MAGIC: "k-afs", /* 0x6B414653 remote */ + LUSTRE_SUPER_MAGIC: "lustre", /* 0x0BD00BD0 remote */ + MINIX_SUPER_MAGIC: "minix", /* 0x137F local */ + MINIX_SUPER_MAGIC2: "minix (30 char.)", /* 0x138F local */ + MINIX2_SUPER_MAGIC: "minix v2", /* 0x2468 local */ + MINIX2_SUPER_MAGIC2: "minix v2 (30 char.)", /* 0x2478 local */ + MINIX3_SUPER_MAGIC: "minix3", /* 0x4D5A local */ + MQUEUE_MAGIC: "mqueue", /* 0x19800202 local */ + MSDOS_SUPER_MAGIC: "msdos", /* 0x4D44 local */ + NCP_SUPER_MAGIC: "novell", /* 0x564C remote */ + NFS_SUPER_MAGIC: "nfs", /* 0x6969 remote */ + NFSD_SUPER_MAGIC: "nfsd", /* 0x6E667364 remote */ + NILFS_SUPER_MAGIC: "nilfs", /* 0x3434 local */ + NTFS_SB_MAGIC: "ntfs", /* 0x5346544E local */ + OPENPROM_SUPER_MAGIC: "openprom", /* 0x9FA1 local */ + OCFS2_SUPER_MAGIC: "ocfs2", /* 0x7461636f remote */ + PANFS_SUPER_MAGIC: "panfs", /* 0xAAD7AAEA remote */ + PIPEFS_MAGIC: "pipefs", /* 0x50495045 remote */ + PROC_SUPER_MAGIC: "proc", /* 0x9FA0 local */ + PSTOREFS_MAGIC: "pstorefs", /* 0x6165676C local */ + QNX4_SUPER_MAGIC: "qnx4", /* 0x002F local */ + QNX6_SUPER_MAGIC: "qnx6", /* 0x68191122 local */ + RAMFS_MAGIC: "ramfs", /* 0x858458F6 local */ + REISERFS_SUPER_MAGIC: "reiserfs", /* 0x52654973 local */ + ROMFS_MAGIC: "romfs", /* 0x7275 local */ + RPC_PIPEFS_SUPER_MAGIC: "rpc_pipefs", /* 0x67596969 local */ + SECURITYFS_SUPER_MAGIC: "securityfs", /* 0x73636673 local */ + SELINUX_MAGIC: "selinux", /* 0xF97CFF8C local */ + SMB_SUPER_MAGIC: "smb", /* 0x517B remote */ + SOCKFS_MAGIC: "sockfs", /* 0x534F434B local */ + SQUASHFS_MAGIC: "squashfs", /* 0x73717368 local */ + SYSFS_MAGIC: "sysfs", /* 0x62656572 local */ + SYSV2_SUPER_MAGIC: "sysv2", /* 0x012FF7B6 local */ + SYSV4_SUPER_MAGIC: "sysv4", /* 0x012FF7B5 local */ + TMPFS_MAGIC: "tmpfs", /* 0x01021994 local */ + UDF_SUPER_MAGIC: "udf", /* 0x15013346 local */ + UFS_MAGIC: "ufs", /* 0x00011954 local */ + UFS_BYTESWAPPED_SUPER_MAGIC: "ufs", /* 0x54190100 local */ + USBDEVICE_SUPER_MAGIC: "usbdevfs", /* 0x9FA2 local */ + V9FS_MAGIC: "v9fs", /* 0x01021997 local */ + VMHGFS_SUPER_MAGIC: "vmhgfs", /* 0xBACBACBC remote */ + VXFS_SUPER_MAGIC: "vxfs", /* 0xA501FCF5 local */ + VZFS_SUPER_MAGIC: "vzfs", /* 0x565A4653 local */ + XENFS_SUPER_MAGIC: "xenfs", /* 0xABBA1974 local */ + XENIX_SUPER_MAGIC: "xenix", /* 0x012FF7B4 local */ + XFS_SUPER_MAGIC: "xfs", /* 0x58465342 local */ + _XIAFS_SUPER_MAGIC: "xia", /* 0x012FD16D local */ + ZFS_SUPER_MAGIC: "zfs", /* 0x2FC12FC1 local */ +} + +// Partitions returns disk partitions. If all is false, returns +// physical devices only (e.g. hard disks, cd-rom drives, USB keys) +// and ignore all others (e.g. memory partitions such as /dev/shm) +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + filename := common.HostProc("self/mountinfo") + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + + fs, err := getFileSystems() + if err != nil { + return nil, err + } + + ret := make([]PartitionStat, 0, len(lines)) + + for _, line := range lines { + // a line of self/mountinfo has the following structure: + // 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + // (1) (2) (3) (4) (5) (6) (7) (8) (9) (10) (11) + + // split the mountinfo line by the separator hyphen + parts := strings.Split(line, " - ") + if len(parts) != 2 { + return nil, fmt.Errorf("found invalid mountinfo line in file %s: %s ", filename, line) + } + + fields := strings.Fields(parts[0]) + blockDeviceID := fields[2] + mountPoint := fields[4] + mountOpts := fields[5] + + fields = strings.Fields(parts[1]) + fstype := fields[0] + device := fields[1] + + d := PartitionStat{ + Device: device, + Mountpoint: mountPoint, + Fstype: fstype, + Opts: mountOpts, + } + if all == false { + if d.Device == "none" || !common.StringsHas(fs, d.Fstype) { + continue + } + } + // /dev/root is not the real device name + // so we get the real device name from its major/minor number + if d.Device == "/dev/root" { + devpath, err := os.Readlink(common.HostSys("/dev/block/" + blockDeviceID)) + if err != nil { + return nil, err + } + d.Device = strings.Replace(d.Device, "root", filepath.Base(devpath), 1) + } + ret = append(ret, d) + } + + return ret, nil +} + +// getFileSystems returns supported filesystems from /proc/filesystems +func getFileSystems() ([]string, error) { + filename := common.HostProc("filesystems") + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + var ret []string + for _, line := range lines { + if !strings.HasPrefix(line, "nodev") { + ret = append(ret, strings.TrimSpace(line)) + continue + } + t := strings.Split(line, "\t") + if len(t) != 2 || t[1] != "zfs" { + continue + } + ret = append(ret, strings.TrimSpace(t[1])) + } + + return ret, nil +} + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + filename := common.HostProc("diskstats") + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + ret := make(map[string]IOCountersStat, 0) + empty := IOCountersStat{} + + // use only basename such as "/dev/sda1" to "sda1" + for i, name := range names { + names[i] = filepath.Base(name) + } + + for _, line := range lines { + fields := strings.Fields(line) + if len(fields) < 14 { + // malformed line in /proc/diskstats, avoid panic by ignoring. + continue + } + name := fields[2] + + if len(names) > 0 && !common.StringsHas(names, name) { + continue + } + + reads, err := strconv.ParseUint((fields[3]), 10, 64) + if err != nil { + return ret, err + } + mergedReads, err := strconv.ParseUint((fields[4]), 10, 64) + if err != nil { + return ret, err + } + rbytes, err := strconv.ParseUint((fields[5]), 10, 64) + if err != nil { + return ret, err + } + rtime, err := strconv.ParseUint((fields[6]), 10, 64) + if err != nil { + return ret, err + } + writes, err := strconv.ParseUint((fields[7]), 10, 64) + if err != nil { + return ret, err + } + mergedWrites, err := strconv.ParseUint((fields[8]), 10, 64) + if err != nil { + return ret, err + } + wbytes, err := strconv.ParseUint((fields[9]), 10, 64) + if err != nil { + return ret, err + } + wtime, err := strconv.ParseUint((fields[10]), 10, 64) + if err != nil { + return ret, err + } + iopsInProgress, err := strconv.ParseUint((fields[11]), 10, 64) + if err != nil { + return ret, err + } + iotime, err := strconv.ParseUint((fields[12]), 10, 64) + if err != nil { + return ret, err + } + weightedIO, err := strconv.ParseUint((fields[13]), 10, 64) + if err != nil { + return ret, err + } + d := IOCountersStat{ + ReadBytes: rbytes * SectorSize, + WriteBytes: wbytes * SectorSize, + ReadCount: reads, + WriteCount: writes, + MergedReadCount: mergedReads, + MergedWriteCount: mergedWrites, + ReadTime: rtime, + WriteTime: wtime, + IopsInProgress: iopsInProgress, + IoTime: iotime, + WeightedIO: weightedIO, + } + if d == empty { + continue + } + d.Name = name + + d.SerialNumber = GetDiskSerialNumber(name) + d.Label = GetLabel(name) + + ret[name] = d + } + return ret, nil +} + +// GetDiskSerialNumber returns Serial Number of given device or empty string +// on error. Name of device is expected, eg. /dev/sda +func GetDiskSerialNumber(name string) string { + return GetDiskSerialNumberWithContext(context.Background(), name) +} + +func GetDiskSerialNumberWithContext(ctx context.Context, name string) string { + var stat unix.Stat_t + err := unix.Stat(name, &stat) + if err != nil { + return "" + } + major := unix.Major(uint64(stat.Rdev)) + minor := unix.Minor(uint64(stat.Rdev)) + + // Try to get the serial from udev data + udevDataPath := common.HostRun(fmt.Sprintf("udev/data/b%d:%d", major, minor)) + if udevdata, err := ioutil.ReadFile(udevDataPath); err == nil { + scanner := bufio.NewScanner(bytes.NewReader(udevdata)) + for scanner.Scan() { + values := strings.Split(scanner.Text(), "=") + if len(values) == 2 && values[0] == "E:ID_SERIAL" { + return values[1] + } + } + } + + // Try to get the serial from sysfs, look at the disk device (minor 0) directly + // because if it is a partition it is not going to contain any device information + devicePath := common.HostSys(fmt.Sprintf("dev/block/%d:0/device", major)) + model, _ := ioutil.ReadFile(filepath.Join(devicePath, "model")) + serial, _ := ioutil.ReadFile(filepath.Join(devicePath, "serial")) + if len(model) > 0 && len(serial) > 0 { + return fmt.Sprintf("%s_%s", string(model), string(serial)) + } + return "" +} + +// GetLabel returns label of given device or empty string on error. +// Name of device is expected, eg. /dev/sda +// Supports label based on devicemapper name +// See https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-block-dm +func GetLabel(name string) string { + // Try label based on devicemapper name + dmname_filename := common.HostSys(fmt.Sprintf("block/%s/dm/name", name)) + + if !common.PathExists(dmname_filename) { + return "" + } + + dmname, err := ioutil.ReadFile(dmname_filename) + if err != nil { + return "" + } else { + return strings.TrimSpace(string(dmname)) + } +} + +func getFsType(stat unix.Statfs_t) string { + t := int64(stat.Type) + ret, ok := fsTypeMap[t] + if !ok { + return "" + } + return ret +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_openbsd.go b/vendor/github.com/shirou/gopsutil/disk/disk_openbsd.go new file mode 100644 index 0000000..6fdf386 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_openbsd.go @@ -0,0 +1,181 @@ +// +build openbsd + +package disk + +import ( + "bytes" + "context" + "encoding/binary" + "path" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + var ret []PartitionStat + + // get length + count, err := unix.Getfsstat(nil, MNT_WAIT) + if err != nil { + return ret, err + } + + fs := make([]Statfs, count) + if _, err = Getfsstat(fs, MNT_WAIT); err != nil { + return ret, err + } + + for _, stat := range fs { + opts := "rw" + if stat.F_flags&MNT_RDONLY != 0 { + opts = "ro" + } + if stat.F_flags&MNT_SYNCHRONOUS != 0 { + opts += ",sync" + } + if stat.F_flags&MNT_NOEXEC != 0 { + opts += ",noexec" + } + if stat.F_flags&MNT_NOSUID != 0 { + opts += ",nosuid" + } + if stat.F_flags&MNT_NODEV != 0 { + opts += ",nodev" + } + if stat.F_flags&MNT_ASYNC != 0 { + opts += ",async" + } + + d := PartitionStat{ + Device: common.IntToString(stat.F_mntfromname[:]), + Mountpoint: common.IntToString(stat.F_mntonname[:]), + Fstype: common.IntToString(stat.F_fstypename[:]), + Opts: opts, + } + if all == false { + if !path.IsAbs(d.Device) || !common.PathExists(d.Device) { + continue + } + } + + ret = append(ret, d) + } + + return ret, nil +} + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + ret := make(map[string]IOCountersStat) + + r, err := unix.SysctlRaw("hw.diskstats") + if err != nil { + return nil, err + } + buf := []byte(r) + length := len(buf) + + count := int(uint64(length) / uint64(sizeOfDiskstats)) + + // parse buf to Diskstats + for i := 0; i < count; i++ { + b := buf[i*sizeOfDiskstats : i*sizeOfDiskstats+sizeOfDiskstats] + d, err := parseDiskstats(b) + if err != nil { + continue + } + name := common.IntToString(d.Name[:]) + + if len(names) > 0 && !common.StringsHas(names, name) { + continue + } + + ds := IOCountersStat{ + ReadCount: d.Rxfer, + WriteCount: d.Wxfer, + ReadBytes: d.Rbytes, + WriteBytes: d.Wbytes, + Name: name, + } + ret[name] = ds + } + + return ret, nil +} + +// BT2LD(time) ((long double)(time).sec + (time).frac * BINTIME_SCALE) + +// Getfsstat is borrowed from pkg/syscall/syscall_freebsd.go +// change Statfs_t to Statfs in order to get more information +func Getfsstat(buf []Statfs, flags int) (n int, err error) { + return GetfsstatWithContext(context.Background(), buf, flags) +} + +func GetfsstatWithContext(ctx context.Context, buf []Statfs, flags int) (n int, err error) { + var _p0 unsafe.Pointer + var bufsize uintptr + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs{}) * uintptr(len(buf)) + } + r0, _, e1 := unix.Syscall(unix.SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func parseDiskstats(buf []byte) (Diskstats, error) { + var ds Diskstats + br := bytes.NewReader(buf) + // err := binary.Read(br, binary.LittleEndian, &ds) + err := common.Read(br, binary.LittleEndian, &ds) + if err != nil { + return ds, err + } + + return ds, nil +} + +func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + stat := unix.Statfs_t{} + err := unix.Statfs(path, &stat) + if err != nil { + return nil, err + } + bsize := stat.F_bsize + + ret := &UsageStat{ + Path: path, + Fstype: getFsType(stat), + Total: (uint64(stat.F_blocks) * uint64(bsize)), + Free: (uint64(stat.F_bavail) * uint64(bsize)), + InodesTotal: (uint64(stat.F_files)), + InodesFree: (uint64(stat.F_ffree)), + } + + ret.InodesUsed = (ret.InodesTotal - ret.InodesFree) + ret.InodesUsedPercent = (float64(ret.InodesUsed) / float64(ret.InodesTotal)) * 100.0 + ret.Used = (uint64(stat.F_blocks) - uint64(stat.F_bfree)) * uint64(bsize) + ret.UsedPercent = (float64(ret.Used) / float64(ret.Total)) * 100.0 + + return ret, nil +} + +func getFsType(stat unix.Statfs_t) string { + return common.IntToString(stat.F_fstypename[:]) +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/disk/disk_openbsd_amd64.go new file mode 100644 index 0000000..07a845f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_openbsd_amd64.go @@ -0,0 +1,91 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package disk + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeofLongDouble = 0x8 + + DEVSTAT_NO_DATA = 0x00 + DEVSTAT_READ = 0x01 + DEVSTAT_WRITE = 0x02 + DEVSTAT_FREE = 0x03 + + MNT_RDONLY = 0x00000001 + MNT_SYNCHRONOUS = 0x00000002 + MNT_NOEXEC = 0x00000004 + MNT_NOSUID = 0x00000008 + MNT_NODEV = 0x00000010 + MNT_ASYNC = 0x00000040 + + MNT_WAIT = 1 + MNT_NOWAIT = 2 + MNT_LAZY = 3 +) + +const ( + sizeOfDiskstats = 0x70 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 + _C_long_double int64 +) + +type Statfs struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + Pad_cgo_0 [4]byte + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + Pad_cgo_1 [2]byte + Mount_info [160]byte +} +type Diskstats struct { + Name [16]int8 + Busy int32 + Pad_cgo_0 [4]byte + Rxfer uint64 + Wxfer uint64 + Seek uint64 + Rbytes uint64 + Wbytes uint64 + Attachtime Timeval + Timestamp Timeval + Time Timeval +} +type Fsid struct { + Val [2]int32 +} +type Timeval struct { + Sec int64 + Usec int64 +} + +type Diskstat struct{} +type Bintime struct{} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_solaris.go b/vendor/github.com/shirou/gopsutil/disk/disk_solaris.go new file mode 100644 index 0000000..c660835 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_solaris.go @@ -0,0 +1,127 @@ +// +build solaris + +package disk + +import ( + "bufio" + "context" + "fmt" + "math" + "os" + "strings" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +const ( + // _DEFAULT_NUM_MOUNTS is set to `cat /etc/mnttab | wc -l` rounded up to the + // nearest power of two. + _DEFAULT_NUM_MOUNTS = 32 + + // _MNTTAB default place to read mount information + _MNTTAB = "/etc/mnttab" +) + +var ( + // A blacklist of read-only virtual filesystems. Writable filesystems are of + // operational concern and must not be included in this list. + fsTypeBlacklist = map[string]struct{}{ + "ctfs": struct{}{}, + "dev": struct{}{}, + "fd": struct{}{}, + "lofs": struct{}{}, + "lxproc": struct{}{}, + "mntfs": struct{}{}, + "objfs": struct{}{}, + "proc": struct{}{}, + } +) + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + ret := make([]PartitionStat, 0, _DEFAULT_NUM_MOUNTS) + + // Scan mnttab(4) + f, err := os.Open(_MNTTAB) + if err != nil { + } + defer func() { + if err == nil { + err = f.Close() + } else { + f.Close() + } + }() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := strings.Split(scanner.Text(), "\t") + + if _, found := fsTypeBlacklist[fields[2]]; found { + continue + } + + ret = append(ret, PartitionStat{ + // NOTE(seanc@): Device isn't exactly accurate: from mnttab(4): "The name + // of the resource that has been mounted." Ideally this value would come + // from Statvfs_t.Fsid but I'm leaving it to the caller to traverse + // unix.Statvfs(). + Device: fields[0], + Mountpoint: fields[1], + Fstype: fields[2], + Opts: fields[3], + }) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("unable to scan %q: %v", _MNTTAB, err) + } + + return ret, err +} + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + statvfs := unix.Statvfs_t{} + if err := unix.Statvfs(path, &statvfs); err != nil { + return nil, fmt.Errorf("unable to call statvfs(2) on %q: %v", path, err) + } + + usageStat := &UsageStat{ + Path: path, + Fstype: common.IntToString(statvfs.Basetype[:]), + Total: statvfs.Blocks * statvfs.Frsize, + Free: statvfs.Bfree * statvfs.Frsize, + Used: (statvfs.Blocks - statvfs.Bfree) * statvfs.Frsize, + + // NOTE: ZFS (and FreeBZSD's UFS2) use dynamic inode/dnode allocation. + // Explicitly return a near-zero value for InodesUsedPercent so that nothing + // attempts to garbage collect based on a lack of available inodes/dnodes. + // Similarly, don't use the zero value to prevent divide-by-zero situations + // and inject a faux near-zero value. Filesystems evolve. Has your + // filesystem evolved? Probably not if you care about the number of + // available inodes. + InodesTotal: 1024.0 * 1024.0, + InodesUsed: 1024.0, + InodesFree: math.MaxUint64, + InodesUsedPercent: (1024.0 / (1024.0 * 1024.0)) * 100.0, + } + + usageStat.UsedPercent = (float64(usageStat.Used) / float64(usageStat.Total)) * 100.0 + + return usageStat, nil +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_unix.go b/vendor/github.com/shirou/gopsutil/disk/disk_unix.go new file mode 100644 index 0000000..9b499b5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_unix.go @@ -0,0 +1,68 @@ +// +build freebsd linux darwin + +package disk + +import ( + "context" + "strconv" + + "golang.org/x/sys/unix" +) + +// Usage returns a file system usage. path is a filessytem path such +// as "/", not device file path like "/dev/vda1". If you want to use +// a return value of disk.Partitions, use "Mountpoint" not "Device". +func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + stat := unix.Statfs_t{} + err := unix.Statfs(path, &stat) + if err != nil { + return nil, err + } + bsize := stat.Bsize + + ret := &UsageStat{ + Path: unescapeFstab(path), + Fstype: getFsType(stat), + Total: (uint64(stat.Blocks) * uint64(bsize)), + Free: (uint64(stat.Bavail) * uint64(bsize)), + InodesTotal: (uint64(stat.Files)), + InodesFree: (uint64(stat.Ffree)), + } + + // if could not get InodesTotal, return empty + if ret.InodesTotal < ret.InodesFree { + return ret, nil + } + + ret.InodesUsed = (ret.InodesTotal - ret.InodesFree) + ret.Used = (uint64(stat.Blocks) - uint64(stat.Bfree)) * uint64(bsize) + + if ret.InodesTotal == 0 { + ret.InodesUsedPercent = 0 + } else { + ret.InodesUsedPercent = (float64(ret.InodesUsed) / float64(ret.InodesTotal)) * 100.0 + } + + if (ret.Used + ret.Free) == 0 { + ret.UsedPercent = 0 + } else { + // We don't use ret.Total to calculate percent. + // see https://github.com/shirou/gopsutil/issues/562 + ret.UsedPercent = (float64(ret.Used) / float64(ret.Used+ret.Free)) * 100.0 + } + + return ret, nil +} + +// Unescape escaped octal chars (like space 040, ampersand 046 and backslash 134) to their real value in fstab fields issue#555 +func unescapeFstab(path string) string { + escaped, err := strconv.Unquote(`"` + path + `"`) + if err != nil { + return path + } + return escaped +} diff --git a/vendor/github.com/shirou/gopsutil/disk/disk_windows.go b/vendor/github.com/shirou/gopsutil/disk/disk_windows.go new file mode 100644 index 0000000..081443b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/disk_windows.go @@ -0,0 +1,168 @@ +// +build windows + +package disk + +import ( + "bytes" + "context" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/windows" +) + +var ( + procGetDiskFreeSpaceExW = common.Modkernel32.NewProc("GetDiskFreeSpaceExW") + procGetLogicalDriveStringsW = common.Modkernel32.NewProc("GetLogicalDriveStringsW") + procGetDriveType = common.Modkernel32.NewProc("GetDriveTypeW") + procGetVolumeInformation = common.Modkernel32.NewProc("GetVolumeInformationW") +) + +var ( + FileFileCompression = int64(16) // 0x00000010 + FileReadOnlyVolume = int64(524288) // 0x00080000 +) + +type Win32_PerfFormattedData struct { + Name string + AvgDiskBytesPerRead uint64 + AvgDiskBytesPerWrite uint64 + AvgDiskReadQueueLength uint64 + AvgDiskWriteQueueLength uint64 + AvgDisksecPerRead uint64 + AvgDisksecPerWrite uint64 +} + +const WaitMSec = 500 + +func Usage(path string) (*UsageStat, error) { + return UsageWithContext(context.Background(), path) +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + ret := &UsageStat{} + + lpFreeBytesAvailable := int64(0) + lpTotalNumberOfBytes := int64(0) + lpTotalNumberOfFreeBytes := int64(0) + diskret, _, err := procGetDiskFreeSpaceExW.Call( + uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(path))), + uintptr(unsafe.Pointer(&lpFreeBytesAvailable)), + uintptr(unsafe.Pointer(&lpTotalNumberOfBytes)), + uintptr(unsafe.Pointer(&lpTotalNumberOfFreeBytes))) + if diskret == 0 { + return nil, err + } + ret = &UsageStat{ + Path: path, + Total: uint64(lpTotalNumberOfBytes), + Free: uint64(lpTotalNumberOfFreeBytes), + Used: uint64(lpTotalNumberOfBytes) - uint64(lpTotalNumberOfFreeBytes), + UsedPercent: (float64(lpTotalNumberOfBytes) - float64(lpTotalNumberOfFreeBytes)) / float64(lpTotalNumberOfBytes) * 100, + // InodesTotal: 0, + // InodesFree: 0, + // InodesUsed: 0, + // InodesUsedPercent: 0, + } + return ret, nil +} + +func Partitions(all bool) ([]PartitionStat, error) { + return PartitionsWithContext(context.Background(), all) +} + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + var ret []PartitionStat + lpBuffer := make([]byte, 254) + diskret, _, err := procGetLogicalDriveStringsW.Call( + uintptr(len(lpBuffer)), + uintptr(unsafe.Pointer(&lpBuffer[0]))) + if diskret == 0 { + return ret, err + } + for _, v := range lpBuffer { + if v >= 65 && v <= 90 { + path := string(v) + ":" + typepath, _ := windows.UTF16PtrFromString(path) + typeret, _, _ := procGetDriveType.Call(uintptr(unsafe.Pointer(typepath))) + if typeret == 0 { + return ret, windows.GetLastError() + } + // 2: DRIVE_REMOVABLE 3: DRIVE_FIXED 4: DRIVE_REMOTE 5: DRIVE_CDROM + + if typeret == 2 || typeret == 3 || typeret == 4 || typeret == 5 { + lpVolumeNameBuffer := make([]byte, 256) + lpVolumeSerialNumber := int64(0) + lpMaximumComponentLength := int64(0) + lpFileSystemFlags := int64(0) + lpFileSystemNameBuffer := make([]byte, 256) + volpath, _ := windows.UTF16PtrFromString(string(v) + ":/") + driveret, _, err := procGetVolumeInformation.Call( + uintptr(unsafe.Pointer(volpath)), + uintptr(unsafe.Pointer(&lpVolumeNameBuffer[0])), + uintptr(len(lpVolumeNameBuffer)), + uintptr(unsafe.Pointer(&lpVolumeSerialNumber)), + uintptr(unsafe.Pointer(&lpMaximumComponentLength)), + uintptr(unsafe.Pointer(&lpFileSystemFlags)), + uintptr(unsafe.Pointer(&lpFileSystemNameBuffer[0])), + uintptr(len(lpFileSystemNameBuffer))) + if driveret == 0 { + if typeret == 5 || typeret == 2 { + continue //device is not ready will happen if there is no disk in the drive + } + return ret, err + } + opts := "rw" + if lpFileSystemFlags&FileReadOnlyVolume != 0 { + opts = "ro" + } + if lpFileSystemFlags&FileFileCompression != 0 { + opts += ".compress" + } + + d := PartitionStat{ + Mountpoint: path, + Device: path, + Fstype: string(bytes.Replace(lpFileSystemNameBuffer, []byte("\x00"), []byte(""), -1)), + Opts: opts, + } + ret = append(ret, d) + } + } + } + return ret, nil +} + +func IOCounters(names ...string) (map[string]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), names...) +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + ret := make(map[string]IOCountersStat, 0) + var dst []Win32_PerfFormattedData + + err := common.WMIQueryWithContext(ctx, "SELECT * FROM Win32_PerfFormattedData_PerfDisk_LogicalDisk", &dst) + if err != nil { + return ret, err + } + for _, d := range dst { + if len(d.Name) > 3 { // not get _Total or Harddrive + continue + } + + if len(names) > 0 && !common.StringsHas(names, d.Name) { + continue + } + + ret[d.Name] = IOCountersStat{ + Name: d.Name, + ReadCount: uint64(d.AvgDiskReadQueueLength), + WriteCount: d.AvgDiskWriteQueueLength, + ReadBytes: uint64(d.AvgDiskBytesPerRead), + WriteBytes: uint64(d.AvgDiskBytesPerWrite), + ReadTime: d.AvgDisksecPerRead, + WriteTime: d.AvgDisksecPerWrite, + } + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go b/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go new file mode 100644 index 0000000..dd6ddc4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/types_freebsd.go @@ -0,0 +1,88 @@ +// +build ignore +// Hand writing: _Ctype_struct___0 + +/* +Input to cgo -godefs. + +*/ + +package disk + +/* +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +// because statinfo has long double snap_time, redefine with changing long long +struct statinfo2 { + long cp_time[CPUSTATES]; + long tk_nin; + long tk_nout; + struct devinfo *dinfo; + long long snap_time; +}; +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong + sizeofLongDouble = C.sizeof_longlong + + DEVSTAT_NO_DATA = 0x00 + DEVSTAT_READ = 0x01 + DEVSTAT_WRITE = 0x02 + DEVSTAT_FREE = 0x03 + + // from sys/mount.h + MNT_RDONLY = 0x00000001 /* read only filesystem */ + MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */ + MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */ + MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */ + MNT_UNION = 0x00000020 /* union with underlying filesystem */ + MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */ + MNT_SUIDDIR = 0x00100000 /* special handling of SUID on dirs */ + MNT_SOFTDEP = 0x00200000 /* soft updates being done */ + MNT_NOSYMFOLLOW = 0x00400000 /* do not follow symlinks */ + MNT_GJOURNAL = 0x02000000 /* GEOM journal support enabled */ + MNT_MULTILABEL = 0x04000000 /* MAC support for individual objects */ + MNT_ACLS = 0x08000000 /* ACL support enabled */ + MNT_NOATIME = 0x10000000 /* disable update of file access time */ + MNT_NOCLUSTERR = 0x40000000 /* disable cluster read */ + MNT_NOCLUSTERW = 0x80000000 /* disable cluster write */ + MNT_NFS4ACLS = 0x00000010 + + MNT_WAIT = 1 /* synchronously wait for I/O to complete */ + MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */ + MNT_LAZY = 3 /* push data not written by filesystem syncer */ + MNT_SUSPEND = 4 /* Suspend file system after sync */ +) + +const ( + sizeOfDevstat = C.sizeof_struct_devstat +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong + _C_long_double C.longlong +) + +type Statfs C.struct_statfs +type Fsid C.struct_fsid + +type Devstat C.struct_devstat +type Bintime C.struct_bintime diff --git a/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go b/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go new file mode 100644 index 0000000..1e3ddef --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/disk/types_openbsd.go @@ -0,0 +1,70 @@ +// +build ignore +// Hand writing: _Ctype_struct___0 + +/* +Input to cgo -godefs. +*/ + +package disk + +/* +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong + sizeofLongDouble = C.sizeof_longlong + + DEVSTAT_NO_DATA = 0x00 + DEVSTAT_READ = 0x01 + DEVSTAT_WRITE = 0x02 + DEVSTAT_FREE = 0x03 + + // from sys/mount.h + MNT_RDONLY = 0x00000001 /* read only filesystem */ + MNT_SYNCHRONOUS = 0x00000002 /* filesystem written synchronously */ + MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */ + MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */ + MNT_NODEV = 0x00000010 /* don't interpret special files */ + MNT_ASYNC = 0x00000040 /* filesystem written asynchronously */ + + MNT_WAIT = 1 /* synchronously wait for I/O to complete */ + MNT_NOWAIT = 2 /* start all I/O, but do not wait for it */ + MNT_LAZY = 3 /* push data not written by filesystem syncer */ +) + +const ( + sizeOfDiskstats = C.sizeof_struct_diskstats +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong + _C_long_double C.longlong +) + +type Statfs C.struct_statfs +type Diskstats C.struct_diskstats +type Fsid C.fsid_t +type Timeval C.struct_timeval + +type Diskstat C.struct_diskstat +type Bintime C.struct_bintime diff --git a/vendor/github.com/shirou/gopsutil/host/host.go b/vendor/github.com/shirou/gopsutil/host/host.go new file mode 100644 index 0000000..1e9e9bb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host.go @@ -0,0 +1,53 @@ +package host + +import ( + "encoding/json" + + "github.com/shirou/gopsutil/internal/common" +) + +var invoke common.Invoker = common.Invoke{} + +// A HostInfoStat describes the host status. +// This is not in the psutil but it useful. +type InfoStat struct { + Hostname string `json:"hostname"` + Uptime uint64 `json:"uptime"` + BootTime uint64 `json:"bootTime"` + Procs uint64 `json:"procs"` // number of processes + OS string `json:"os"` // ex: freebsd, linux + Platform string `json:"platform"` // ex: ubuntu, linuxmint + PlatformFamily string `json:"platformFamily"` // ex: debian, rhel + PlatformVersion string `json:"platformVersion"` // version of the complete OS + KernelVersion string `json:"kernelVersion"` // version of the OS kernel (if available) + VirtualizationSystem string `json:"virtualizationSystem"` + VirtualizationRole string `json:"virtualizationRole"` // guest or host + HostID string `json:"hostid"` // ex: uuid +} + +type UserStat struct { + User string `json:"user"` + Terminal string `json:"terminal"` + Host string `json:"host"` + Started int `json:"started"` +} + +type TemperatureStat struct { + SensorKey string `json:"sensorKey"` + Temperature float64 `json:"sensorTemperature"` +} + +func (h InfoStat) String() string { + s, _ := json.Marshal(h) + return string(s) +} + +func (u UserStat) String() string { + s, _ := json.Marshal(u) + return string(s) +} + +func (t TemperatureStat) String() string { + s, _ := json.Marshal(t) + return string(s) +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_darwin.go b/vendor/github.com/shirou/gopsutil/host/host_darwin.go new file mode 100644 index 0000000..5cf22b7 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_darwin.go @@ -0,0 +1,221 @@ +// +build darwin + +package host + +import ( + "bytes" + "context" + "encoding/binary" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "sync/atomic" + "time" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/process" + "golang.org/x/sys/unix" +) + +// from utmpx.h +const USER_PROCESS = 7 + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + ret := &InfoStat{ + OS: runtime.GOOS, + PlatformFamily: "darwin", + } + + hostname, err := os.Hostname() + if err == nil { + ret.Hostname = hostname + } + + kernelVersion, err := KernelVersionWithContext(ctx) + if err == nil { + ret.KernelVersion = kernelVersion + } + + platform, family, pver, err := PlatformInformation() + if err == nil { + ret.Platform = platform + ret.PlatformFamily = family + ret.PlatformVersion = pver + } + + system, role, err := Virtualization() + if err == nil { + ret.VirtualizationSystem = system + ret.VirtualizationRole = role + } + + boot, err := BootTime() + if err == nil { + ret.BootTime = boot + ret.Uptime = uptime(boot) + } + + procs, err := process.Pids() + if err == nil { + ret.Procs = uint64(len(procs)) + } + + values, err := common.DoSysctrlWithContext(ctx, "kern.uuid") + if err == nil && len(values) == 1 && values[0] != "" { + ret.HostID = strings.ToLower(values[0]) + } + + return ret, nil +} + +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + values, err := common.DoSysctrlWithContext(ctx, "kern.boottime") + if err != nil { + return 0, err + } + // ex: { sec = 1392261637, usec = 627534 } Thu Feb 13 12:20:37 2014 + v := strings.Replace(values[2], ",", "", 1) + boottime, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, err + } + t = uint64(boottime) + atomic.StoreUint64(&cachedBootTime, t) + + return t, nil +} + +func uptime(boot uint64) uint64 { + return uint64(time.Now().Unix()) - boot +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + boot, err := BootTime() + if err != nil { + return 0, err + } + return uptime(boot), nil +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + utmpfile := "/var/run/utmpx" + var ret []UserStat + + file, err := os.Open(utmpfile) + if err != nil { + return ret, err + } + defer file.Close() + + buf, err := ioutil.ReadAll(file) + if err != nil { + return ret, err + } + + u := Utmpx{} + entrySize := int(unsafe.Sizeof(u)) + count := len(buf) / entrySize + + for i := 0; i < count; i++ { + b := buf[i*entrySize : i*entrySize+entrySize] + + var u Utmpx + br := bytes.NewReader(b) + err := binary.Read(br, binary.LittleEndian, &u) + if err != nil { + continue + } + if u.Type != USER_PROCESS { + continue + } + user := UserStat{ + User: common.IntToString(u.User[:]), + Terminal: common.IntToString(u.Line[:]), + Host: common.IntToString(u.Host[:]), + Started: int(u.Tv.Sec), + } + ret = append(ret, user) + } + + return ret, nil + +} + +func PlatformInformation() (string, string, string, error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { + platform := "" + family := "" + pver := "" + + sw_vers, err := exec.LookPath("sw_vers") + if err != nil { + return "", "", "", err + } + + p, err := unix.Sysctl("kern.ostype") + if err == nil { + platform = strings.ToLower(p) + } + + out, err := invoke.CommandWithContext(ctx, sw_vers, "-productVersion") + if err == nil { + pver = strings.ToLower(strings.TrimSpace(string(out))) + } + + return platform, family, pver, nil +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + version, err := unix.Sysctl("kern.osrelease") + return strings.ToLower(version), err +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + return []TemperatureStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_darwin_386.go b/vendor/github.com/shirou/gopsutil/host/host_darwin_386.go new file mode 100644 index 0000000..c3596f9 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_darwin_386.go @@ -0,0 +1,19 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package host + +type Utmpx struct { + User [256]int8 + ID [4]int8 + Line [32]int8 + Pid int32 + Type int16 + Pad_cgo_0 [6]byte + Tv Timeval + Host [256]int8 + Pad [16]uint32 +} +type Timeval struct { + Sec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/host/host_darwin_amd64.go new file mode 100644 index 0000000..c3596f9 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_darwin_amd64.go @@ -0,0 +1,19 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package host + +type Utmpx struct { + User [256]int8 + ID [4]int8 + Line [32]int8 + Pid int32 + Type int16 + Pad_cgo_0 [6]byte + Tv Timeval + Host [256]int8 + Pad [16]uint32 +} +type Timeval struct { + Sec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_fallback.go b/vendor/github.com/shirou/gopsutil/host/host_fallback.go new file mode 100644 index 0000000..e80d7ea --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_fallback.go @@ -0,0 +1,65 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows + +package host + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + return nil, common.ErrNotImplementedError +} + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + return 0, common.ErrNotImplementedError +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + return 0, common.ErrNotImplementedError +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + return []UserStat{}, common.ErrNotImplementedError +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func PlatformInformation() (string, string, string, error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { + return "", "", "", common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_freebsd.go b/vendor/github.com/shirou/gopsutil/host/host_freebsd.go new file mode 100644 index 0000000..00a8519 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_freebsd.go @@ -0,0 +1,245 @@ +// +build freebsd + +package host + +import ( + "bytes" + "context" + "encoding/binary" + "io/ioutil" + "os" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/process" + "golang.org/x/sys/unix" +) + +const ( + UTNameSize = 16 /* see MAXLOGNAME in */ + UTLineSize = 8 + UTHostSize = 16 +) + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + ret := &InfoStat{ + OS: runtime.GOOS, + PlatformFamily: "freebsd", + } + + hostname, err := os.Hostname() + if err == nil { + ret.Hostname = hostname + } + + platform, family, version, err := PlatformInformation() + if err == nil { + ret.Platform = platform + ret.PlatformFamily = family + ret.PlatformVersion = version + ret.KernelVersion = version + } + + system, role, err := Virtualization() + if err == nil { + ret.VirtualizationSystem = system + ret.VirtualizationRole = role + } + + boot, err := BootTime() + if err == nil { + ret.BootTime = boot + ret.Uptime = uptime(boot) + } + + procs, err := process.Pids() + if err == nil { + ret.Procs = uint64(len(procs)) + } + + hostid, err := unix.Sysctl("kern.hostuuid") + if err == nil && hostid != "" { + ret.HostID = strings.ToLower(hostid) + } + + return ret, nil +} + +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + buf, err := unix.SysctlRaw("kern.boottime") + if err != nil { + return 0, err + } + + tv := *(*syscall.Timeval)(unsafe.Pointer((&buf[0]))) + atomic.StoreUint64(&cachedBootTime, uint64(tv.Sec)) + + return t, nil +} + +func uptime(boot uint64) uint64 { + return uint64(time.Now().Unix()) - boot +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + boot, err := BootTime() + if err != nil { + return 0, err + } + return uptime(boot), nil +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + utmpfile := "/var/run/utx.active" + if !common.PathExists(utmpfile) { + utmpfile = "/var/run/utmp" // before 9.0 + return getUsersFromUtmp(utmpfile) + } + + var ret []UserStat + file, err := os.Open(utmpfile) + if err != nil { + return ret, err + } + defer file.Close() + + buf, err := ioutil.ReadAll(file) + if err != nil { + return ret, err + } + + entrySize := sizeOfUtmpx + count := len(buf) / entrySize + + for i := 0; i < count; i++ { + b := buf[i*sizeOfUtmpx : (i+1)*sizeOfUtmpx] + var u Utmpx + br := bytes.NewReader(b) + err := binary.Read(br, binary.LittleEndian, &u) + if err != nil || u.Type != 4 { + continue + } + sec := (binary.LittleEndian.Uint32(u.Tv.Sec[:])) / 2 // TODO: + user := UserStat{ + User: common.IntToString(u.User[:]), + Terminal: common.IntToString(u.Line[:]), + Host: common.IntToString(u.Host[:]), + Started: int(sec), + } + + ret = append(ret, user) + } + + return ret, nil + +} + +func PlatformInformation() (string, string, string, error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { + platform, err := unix.Sysctl("kern.ostype") + if err != nil { + return "", "", "", err + } + + version, err := unix.Sysctl("kern.osrelease") + if err != nil { + return "", "", "", err + } + + return strings.ToLower(platform), "", strings.ToLower(version), nil +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +// before 9.0 +func getUsersFromUtmp(utmpfile string) ([]UserStat, error) { + var ret []UserStat + file, err := os.Open(utmpfile) + if err != nil { + return ret, err + } + defer file.Close() + + buf, err := ioutil.ReadAll(file) + if err != nil { + return ret, err + } + + u := Utmp{} + entrySize := int(unsafe.Sizeof(u)) + count := len(buf) / entrySize + + for i := 0; i < count; i++ { + b := buf[i*entrySize : i*entrySize+entrySize] + var u Utmp + br := bytes.NewReader(b) + err := binary.Read(br, binary.LittleEndian, &u) + if err != nil || u.Time == 0 { + continue + } + user := UserStat{ + User: common.IntToString(u.Name[:]), + Terminal: common.IntToString(u.Line[:]), + Host: common.IntToString(u.Host[:]), + Started: int(u.Time), + } + + ret = append(ret, user) + } + + return ret, nil +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + return []TemperatureStat{}, common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + _, _, version, err := PlatformInformation() + return version, err +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_freebsd_386.go b/vendor/github.com/shirou/gopsutil/host/host_freebsd_386.go new file mode 100644 index 0000000..7f06d8f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_freebsd_386.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmpx = 197 // TODO why should 197 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Utmp struct { + Line [8]int8 + Name [16]int8 + Host [16]int8 + Time int32 +} + +type Utmpx struct { + Type int16 + Tv Timeval + Id [8]int8 + Pid int32 + User [32]int8 + Line [16]int8 + Host [125]int8 + // X__ut_spare [64]int8 +} + +type Timeval struct { + Sec [4]byte + Usec [3]byte +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/host/host_freebsd_amd64.go new file mode 100644 index 0000000..3f015f0 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_freebsd_amd64.go @@ -0,0 +1,44 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmpx = 197 // TODO: why should 197, not 0x118 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Utmp struct { + Line [8]int8 + Name [16]int8 + Host [16]int8 + Time int32 +} + +type Utmpx struct { + Type int16 + Tv Timeval + Id [8]int8 + Pid int32 + User [32]int8 + Line [16]int8 + Host [125]int8 + // Host [128]int8 + // X__ut_spare [64]int8 +} + +type Timeval struct { + Sec [4]byte + Usec [3]byte +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/host/host_freebsd_arm.go new file mode 100644 index 0000000..ac74980 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_freebsd_arm.go @@ -0,0 +1,44 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmpx = 197 // TODO: why should 197, not 0x118 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Utmp struct { + Line [8]int8 + Name [16]int8 + Host [16]int8 + Time int32 +} + +type Utmpx struct { + Type int16 + Tv Timeval + Id [8]int8 + Pid int32 + User [32]int8 + Line [16]int8 + Host [125]int8 + // Host [128]int8 + // X__ut_spare [64]int8 +} + +type Timeval struct { + Sec [4]byte + Usec [3]byte +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux.go b/vendor/github.com/shirou/gopsutil/host/host_linux.go new file mode 100644 index 0000000..2c1ac6b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux.go @@ -0,0 +1,682 @@ +// +build linux + +package host + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/shirou/gopsutil/internal/common" +) + +type LSB struct { + ID string + Release string + Codename string + Description string +} + +// from utmp.h +const USER_PROCESS = 7 + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + ret := &InfoStat{ + OS: runtime.GOOS, + } + + hostname, err := os.Hostname() + if err == nil { + ret.Hostname = hostname + } + + platform, family, version, err := PlatformInformation() + if err == nil { + ret.Platform = platform + ret.PlatformFamily = family + ret.PlatformVersion = version + } + kernelVersion, err := KernelVersion() + if err == nil { + ret.KernelVersion = kernelVersion + } + + system, role, err := Virtualization() + if err == nil { + ret.VirtualizationSystem = system + ret.VirtualizationRole = role + } + + boot, err := BootTime() + if err == nil { + ret.BootTime = boot + ret.Uptime = uptime(boot) + } + + if numProcs, err := common.NumProcs(); err == nil { + ret.Procs = numProcs + } + + sysProductUUID := common.HostSys("class/dmi/id/product_uuid") + machineID := common.HostEtc("machine-id") + switch { + // In order to read this file, needs to be supported by kernel/arch and run as root + // so having fallback is important + case common.PathExists(sysProductUUID): + lines, err := common.ReadLines(sysProductUUID) + if err == nil && len(lines) > 0 && lines[0] != "" { + ret.HostID = strings.ToLower(lines[0]) + break + } + fallthrough + // Fallback on GNU Linux systems with systemd, readable by everyone + case common.PathExists(machineID): + lines, err := common.ReadLines(machineID) + if err == nil && len(lines) > 0 && len(lines[0]) == 32 { + st := lines[0] + ret.HostID = fmt.Sprintf("%s-%s-%s-%s-%s", st[0:8], st[8:12], st[12:16], st[16:20], st[20:32]) + break + } + fallthrough + // Not stable between reboot, but better than nothing + default: + values, err := common.DoSysctrl("kernel.random.boot_id") + if err == nil && len(values) == 1 && values[0] != "" { + ret.HostID = strings.ToLower(values[0]) + } + } + + return ret, nil +} + +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + +// BootTime returns the system boot time expressed in seconds since the epoch. +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + + system, role, err := Virtualization() + if err != nil { + return 0, err + } + + statFile := "stat" + if system == "lxc" && role == "guest" { + // if lxc, /proc/uptime is used. + statFile = "uptime" + } else if system == "docker" && role == "guest" { + // also docker, guest + statFile = "uptime" + } + + filename := common.HostProc(statFile) + lines, err := common.ReadLines(filename) + if err != nil { + return 0, err + } + + if statFile == "stat" { + for _, line := range lines { + if strings.HasPrefix(line, "btime") { + f := strings.Fields(line) + if len(f) != 2 { + return 0, fmt.Errorf("wrong btime format") + } + b, err := strconv.ParseInt(f[1], 10, 64) + if err != nil { + return 0, err + } + t = uint64(b) + atomic.StoreUint64(&cachedBootTime, t) + return t, nil + } + } + } else if statFile == "uptime" { + if len(lines) != 1 { + return 0, fmt.Errorf("wrong uptime format") + } + f := strings.Fields(lines[0]) + b, err := strconv.ParseFloat(f[0], 64) + if err != nil { + return 0, err + } + t = uint64(time.Now().Unix()) - uint64(b) + atomic.StoreUint64(&cachedBootTime, t) + return t, nil + } + + return 0, fmt.Errorf("could not find btime") +} + +func uptime(boot uint64) uint64 { + return uint64(time.Now().Unix()) - boot +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + boot, err := BootTime() + if err != nil { + return 0, err + } + return uptime(boot), nil +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + utmpfile := common.HostVar("run/utmp") + + file, err := os.Open(utmpfile) + if err != nil { + return nil, err + } + defer file.Close() + + buf, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + + count := len(buf) / sizeOfUtmp + + ret := make([]UserStat, 0, count) + + for i := 0; i < count; i++ { + b := buf[i*sizeOfUtmp : (i+1)*sizeOfUtmp] + + var u utmp + br := bytes.NewReader(b) + err := binary.Read(br, binary.LittleEndian, &u) + if err != nil { + continue + } + if u.Type != USER_PROCESS { + continue + } + user := UserStat{ + User: common.IntToString(u.User[:]), + Terminal: common.IntToString(u.Line[:]), + Host: common.IntToString(u.Host[:]), + Started: int(u.Tv.Sec), + } + ret = append(ret, user) + } + + return ret, nil + +} + +func getOSRelease() (platform string, version string, err error) { + contents, err := common.ReadLines(common.HostEtc("os-release")) + if err != nil { + return "", "", nil // return empty + } + for _, line := range contents { + field := strings.Split(line, "=") + if len(field) < 2 { + continue + } + switch field[0] { + case "ID": // use ID for lowercase + platform = field[1] + case "VERSION": + version = field[1] + } + } + return platform, version, nil +} + +func getLSB() (*LSB, error) { + ret := &LSB{} + if common.PathExists(common.HostEtc("lsb-release")) { + contents, err := common.ReadLines(common.HostEtc("lsb-release")) + if err != nil { + return ret, err // return empty + } + for _, line := range contents { + field := strings.Split(line, "=") + if len(field) < 2 { + continue + } + switch field[0] { + case "DISTRIB_ID": + ret.ID = field[1] + case "DISTRIB_RELEASE": + ret.Release = field[1] + case "DISTRIB_CODENAME": + ret.Codename = field[1] + case "DISTRIB_DESCRIPTION": + ret.Description = field[1] + } + } + } else if common.PathExists("/usr/bin/lsb_release") { + lsb_release, err := exec.LookPath("/usr/bin/lsb_release") + if err != nil { + return ret, err + } + out, err := invoke.Command(lsb_release) + if err != nil { + return ret, err + } + for _, line := range strings.Split(string(out), "\n") { + field := strings.Split(line, ":") + if len(field) < 2 { + continue + } + switch field[0] { + case "Distributor ID": + ret.ID = field[1] + case "Release": + ret.Release = field[1] + case "Codename": + ret.Codename = field[1] + case "Description": + ret.Description = field[1] + } + } + + } + + return ret, nil +} + +func PlatformInformation() (platform string, family string, version string, err error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (platform string, family string, version string, err error) { + + lsb, err := getLSB() + if err != nil { + lsb = &LSB{} + } + + if common.PathExists(common.HostEtc("oracle-release")) { + platform = "oracle" + contents, err := common.ReadLines(common.HostEtc("oracle-release")) + if err == nil { + version = getRedhatishVersion(contents) + } + + } else if common.PathExists(common.HostEtc("enterprise-release")) { + platform = "oracle" + contents, err := common.ReadLines(common.HostEtc("enterprise-release")) + if err == nil { + version = getRedhatishVersion(contents) + } + } else if common.PathExists(common.HostEtc("slackware-version")) { + platform = "slackware" + contents, err := common.ReadLines(common.HostEtc("slackware-version")) + if err == nil { + version = getSlackwareVersion(contents) + } + } else if common.PathExists(common.HostEtc("debian_version")) { + if lsb.ID == "Ubuntu" { + platform = "ubuntu" + version = lsb.Release + } else if lsb.ID == "LinuxMint" { + platform = "linuxmint" + version = lsb.Release + } else { + if common.PathExists("/usr/bin/raspi-config") { + platform = "raspbian" + } else { + platform = "debian" + } + contents, err := common.ReadLines(common.HostEtc("debian_version")) + if err == nil { + version = contents[0] + } + } + } else if common.PathExists(common.HostEtc("redhat-release")) { + contents, err := common.ReadLines(common.HostEtc("redhat-release")) + if err == nil { + version = getRedhatishVersion(contents) + platform = getRedhatishPlatform(contents) + } + } else if common.PathExists(common.HostEtc("system-release")) { + contents, err := common.ReadLines(common.HostEtc("system-release")) + if err == nil { + version = getRedhatishVersion(contents) + platform = getRedhatishPlatform(contents) + } + } else if common.PathExists(common.HostEtc("gentoo-release")) { + platform = "gentoo" + contents, err := common.ReadLines(common.HostEtc("gentoo-release")) + if err == nil { + version = getRedhatishVersion(contents) + } + } else if common.PathExists(common.HostEtc("SuSE-release")) { + contents, err := common.ReadLines(common.HostEtc("SuSE-release")) + if err == nil { + version = getSuseVersion(contents) + platform = getSusePlatform(contents) + } + // TODO: slackware detecion + } else if common.PathExists(common.HostEtc("arch-release")) { + platform = "arch" + version = lsb.Release + } else if common.PathExists(common.HostEtc("alpine-release")) { + platform = "alpine" + contents, err := common.ReadLines(common.HostEtc("alpine-release")) + if err == nil && len(contents) > 0 { + version = contents[0] + } + } else if common.PathExists(common.HostEtc("os-release")) { + p, v, err := getOSRelease() + if err == nil { + platform = p + version = v + } + } else if lsb.ID == "RedHat" { + platform = "redhat" + version = lsb.Release + } else if lsb.ID == "Amazon" { + platform = "amazon" + version = lsb.Release + } else if lsb.ID == "ScientificSL" { + platform = "scientific" + version = lsb.Release + } else if lsb.ID == "XenServer" { + platform = "xenserver" + version = lsb.Release + } else if lsb.ID != "" { + platform = strings.ToLower(lsb.ID) + version = lsb.Release + } + + switch platform { + case "debian", "ubuntu", "linuxmint", "raspbian": + family = "debian" + case "fedora": + family = "fedora" + case "oracle", "centos", "redhat", "scientific", "enterpriseenterprise", "amazon", "xenserver", "cloudlinux", "ibm_powerkvm": + family = "rhel" + case "suse", "opensuse": + family = "suse" + case "gentoo": + family = "gentoo" + case "slackware": + family = "slackware" + case "arch": + family = "arch" + case "exherbo": + family = "exherbo" + case "alpine": + family = "alpine" + case "coreos": + family = "coreos" + } + + return platform, family, version, nil + +} + +func KernelVersion() (version string, err error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (version string, err error) { + filename := common.HostProc("sys/kernel/osrelease") + if common.PathExists(filename) { + contents, err := common.ReadLines(filename) + if err != nil { + return "", err + } + + if len(contents) > 0 { + version = contents[0] + } + } + + return version, nil +} + +func getSlackwareVersion(contents []string) string { + c := strings.ToLower(strings.Join(contents, "")) + c = strings.Replace(c, "slackware ", "", 1) + return c +} + +func getRedhatishVersion(contents []string) string { + c := strings.ToLower(strings.Join(contents, "")) + + if strings.Contains(c, "rawhide") { + return "rawhide" + } + if matches := regexp.MustCompile(`release (\d[\d.]*)`).FindStringSubmatch(c); matches != nil { + return matches[1] + } + return "" +} + +func getRedhatishPlatform(contents []string) string { + c := strings.ToLower(strings.Join(contents, "")) + + if strings.Contains(c, "red hat") { + return "redhat" + } + f := strings.Split(c, " ") + + return f[0] +} + +func getSuseVersion(contents []string) string { + version := "" + for _, line := range contents { + if matches := regexp.MustCompile(`VERSION = ([\d.]+)`).FindStringSubmatch(line); matches != nil { + version = matches[1] + } else if matches := regexp.MustCompile(`PATCHLEVEL = ([\d]+)`).FindStringSubmatch(line); matches != nil { + version = version + "." + matches[1] + } + } + return version +} + +func getSusePlatform(contents []string) string { + c := strings.ToLower(strings.Join(contents, "")) + if strings.Contains(c, "opensuse") { + return "opensuse" + } + return "suse" +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + var system string + var role string + + filename := common.HostProc("xen") + if common.PathExists(filename) { + system = "xen" + role = "guest" // assume guest + + if common.PathExists(filepath.Join(filename, "capabilities")) { + contents, err := common.ReadLines(filepath.Join(filename, "capabilities")) + if err == nil { + if common.StringsContains(contents, "control_d") { + role = "host" + } + } + } + } + + filename = common.HostProc("modules") + if common.PathExists(filename) { + contents, err := common.ReadLines(filename) + if err == nil { + if common.StringsContains(contents, "kvm") { + system = "kvm" + role = "host" + } else if common.StringsContains(contents, "vboxdrv") { + system = "vbox" + role = "host" + } else if common.StringsContains(contents, "vboxguest") { + system = "vbox" + role = "guest" + } else if common.StringsContains(contents, "vmware") { + system = "vmware" + role = "guest" + } + } + } + + filename = common.HostProc("cpuinfo") + if common.PathExists(filename) { + contents, err := common.ReadLines(filename) + if err == nil { + if common.StringsContains(contents, "QEMU Virtual CPU") || + common.StringsContains(contents, "Common KVM processor") || + common.StringsContains(contents, "Common 32-bit KVM processor") { + system = "kvm" + role = "guest" + } + } + } + + filename = common.HostProc() + if common.PathExists(filepath.Join(filename, "bc", "0")) { + system = "openvz" + role = "host" + } else if common.PathExists(filepath.Join(filename, "vz")) { + system = "openvz" + role = "guest" + } + + // not use dmidecode because it requires root + if common.PathExists(filepath.Join(filename, "self", "status")) { + contents, err := common.ReadLines(filepath.Join(filename, "self", "status")) + if err == nil { + + if common.StringsContains(contents, "s_context:") || + common.StringsContains(contents, "VxID:") { + system = "linux-vserver" + } + // TODO: guest or host + } + } + + if common.PathExists(filepath.Join(filename, "self", "cgroup")) { + contents, err := common.ReadLines(filepath.Join(filename, "self", "cgroup")) + if err == nil { + if common.StringsContains(contents, "lxc") { + system = "lxc" + role = "guest" + } else if common.StringsContains(contents, "docker") { + system = "docker" + role = "guest" + } else if common.StringsContains(contents, "machine-rkt") { + system = "rkt" + role = "guest" + } else if common.PathExists("/usr/bin/lxc-version") { + system = "lxc" + role = "host" + } + } + } + + if common.PathExists(common.HostEtc("os-release")) { + p, _, err := getOSRelease() + if err == nil && p == "coreos" { + system = "rkt" // Is it true? + role = "host" + } + } + return system, role, nil +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + var temperatures []TemperatureStat + files, err := filepath.Glob(common.HostSys("/class/hwmon/hwmon*/temp*_*")) + if err != nil { + return temperatures, err + } + if len(files) == 0 { + // CentOS has an intermediate /device directory: + // https://github.com/giampaolo/psutil/issues/971 + files, err = filepath.Glob(common.HostSys("/class/hwmon/hwmon*/device/temp*_*")) + if err != nil { + return temperatures, err + } + } + + // example directory + // device/ temp1_crit_alarm temp2_crit_alarm temp3_crit_alarm temp4_crit_alarm temp5_crit_alarm temp6_crit_alarm temp7_crit_alarm + // name temp1_input temp2_input temp3_input temp4_input temp5_input temp6_input temp7_input + // power/ temp1_label temp2_label temp3_label temp4_label temp5_label temp6_label temp7_label + // subsystem/ temp1_max temp2_max temp3_max temp4_max temp5_max temp6_max temp7_max + // temp1_crit temp2_crit temp3_crit temp4_crit temp5_crit temp6_crit temp7_crit uevent + for _, file := range files { + filename := strings.Split(filepath.Base(file), "_") + if filename[1] == "label" { + // Do not try to read the temperature of the label file + continue + } + + // Get the label of the temperature you are reading + var label string + c, _ := ioutil.ReadFile(filepath.Join(filepath.Dir(file), filename[0]+"_label")) + if c != nil { + //format the label from "Core 0" to "core0_" + label = fmt.Sprintf("%s_", strings.Join(strings.Split(strings.TrimSpace(strings.ToLower(string(c))), " "), "")) + } + + // Get the name of the tempearture you are reading + name, err := ioutil.ReadFile(filepath.Join(filepath.Dir(file), "name")) + if err != nil { + return temperatures, err + } + + // Get the temperature reading + current, err := ioutil.ReadFile(file) + if err != nil { + return temperatures, err + } + temperature, err := strconv.ParseFloat(strings.TrimSpace(string(current)), 64) + if err != nil { + continue + } + + tempName := strings.TrimSpace(strings.ToLower(string(strings.Join(filename[1:], "")))) + temperatures = append(temperatures, TemperatureStat{ + SensorKey: fmt.Sprintf("%s_%s%s", strings.TrimSpace(string(name)), label, tempName), + Temperature: temperature / 1000.0, + }) + } + return temperatures, nil +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_386.go b/vendor/github.com/shirou/gopsutil/host/host_linux_386.go new file mode 100644 index 0000000..79b5cb5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_386.go @@ -0,0 +1,45 @@ +// ATTENTION - FILE MANUAL FIXED AFTER CGO. +// Fixed line: Tv _Ctype_struct_timeval -> Tv UtTv +// Created by cgo -godefs, MANUAL FIXED +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + ID [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv UtTv + Addr_v6 [4]int32 + X__unused [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type UtTv struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_amd64.go b/vendor/github.com/shirou/gopsutil/host/host_linux_amd64.go new file mode 100644 index 0000000..9a69652 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_amd64.go @@ -0,0 +1,48 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv _Ctype_struct___0 + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int64 + Usec int64 +} + +type _Ctype_struct___0 struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_arm.go b/vendor/github.com/shirou/gopsutil/host/host_linux_arm.go new file mode 100644 index 0000000..e2cf448 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_arm.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go | sed "s/uint8/int8/g" + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_arm64.go b/vendor/github.com/shirou/gopsutil/host/host_linux_arm64.go new file mode 100644 index 0000000..37dbe5c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_arm64.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int64 + Usec int64 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_mips.go b/vendor/github.com/shirou/gopsutil/host/host_linux_mips.go new file mode 100644 index 0000000..b0fca09 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_mips.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__unused [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_mips64.go b/vendor/github.com/shirou/gopsutil/host/host_linux_mips64.go new file mode 100644 index 0000000..b0fca09 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_mips64.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__unused [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_mips64le.go b/vendor/github.com/shirou/gopsutil/host/host_linux_mips64le.go new file mode 100644 index 0000000..b0fca09 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_mips64le.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__unused [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_mipsle.go b/vendor/github.com/shirou/gopsutil/host/host_linux_mipsle.go new file mode 100644 index 0000000..b0fca09 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_mipsle.go @@ -0,0 +1,43 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__unused [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int32 + Usec int32 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_ppc64le.go b/vendor/github.com/shirou/gopsutil/host/host_linux_ppc64le.go new file mode 100644 index 0000000..d081a08 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_ppc64le.go @@ -0,0 +1,45 @@ +// +build linux +// +build ppc64le +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int64 + Usec int64 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_linux_s390x.go b/vendor/github.com/shirou/gopsutil/host/host_linux_s390x.go new file mode 100644 index 0000000..083fbf9 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_linux_s390x.go @@ -0,0 +1,45 @@ +// +build linux +// +build s390x +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_linux.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x180 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type utmp struct { + Type int16 + Pad_cgo_0 [2]byte + Pid int32 + Line [32]int8 + Id [4]int8 + User [32]int8 + Host [256]int8 + Exit exit_status + Session int32 + Tv timeval + Addr_v6 [4]int32 + X__glibc_reserved [20]int8 +} +type exit_status struct { + Termination int16 + Exit int16 +} +type timeval struct { + Sec int64 + Usec int64 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_openbsd.go b/vendor/github.com/shirou/gopsutil/host/host_openbsd.go new file mode 100644 index 0000000..bb16fca --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_openbsd.go @@ -0,0 +1,190 @@ +// +build openbsd + +package host + +import ( + "bytes" + "context" + "encoding/binary" + "io/ioutil" + "os" + "runtime" + "strconv" + "strings" + "time" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/process" + "golang.org/x/sys/unix" +) + +const ( + UTNameSize = 32 /* see MAXLOGNAME in */ + UTLineSize = 8 + UTHostSize = 16 +) + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + ret := &InfoStat{ + OS: runtime.GOOS, + PlatformFamily: "openbsd", + } + + hostname, err := os.Hostname() + if err == nil { + ret.Hostname = hostname + } + + platform, family, version, err := PlatformInformation() + if err == nil { + ret.Platform = platform + ret.PlatformFamily = family + ret.PlatformVersion = version + } + system, role, err := Virtualization() + if err == nil { + ret.VirtualizationSystem = system + ret.VirtualizationRole = role + } + + procs, err := process.Pids() + if err == nil { + ret.Procs = uint64(len(procs)) + } + + boot, err := BootTime() + if err == nil { + ret.BootTime = boot + ret.Uptime = uptime(boot) + } + + return ret, nil +} + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + val, err := common.DoSysctrl("kern.boottime") + if err != nil { + return 0, err + } + + boottime, err := strconv.ParseUint(val[0], 10, 64) + if err != nil { + return 0, err + } + + return boottime, nil +} + +func uptime(boot uint64) uint64 { + return uint64(time.Now().Unix()) - boot +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + boot, err := BootTime() + if err != nil { + return 0, err + } + return uptime(boot), nil +} + +func PlatformInformation() (string, string, string, error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { + platform := "" + family := "" + version := "" + + p, err := unix.Sysctl("kern.ostype") + if err == nil { + platform = strings.ToLower(p) + } + v, err := unix.Sysctl("kern.osrelease") + if err == nil { + version = strings.ToLower(v) + } + + return platform, family, version, nil +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + var ret []UserStat + utmpfile := "/var/run/utmp" + file, err := os.Open(utmpfile) + if err != nil { + return ret, err + } + defer file.Close() + + buf, err := ioutil.ReadAll(file) + if err != nil { + return ret, err + } + + u := Utmp{} + entrySize := int(unsafe.Sizeof(u)) + count := len(buf) / entrySize + + for i := 0; i < count; i++ { + b := buf[i*entrySize : i*entrySize+entrySize] + var u Utmp + br := bytes.NewReader(b) + err := binary.Read(br, binary.LittleEndian, &u) + if err != nil || u.Time == 0 { + continue + } + user := UserStat{ + User: common.IntToString(u.Name[:]), + Terminal: common.IntToString(u.Line[:]), + Host: common.IntToString(u.Host[:]), + Started: int(u.Time), + } + + ret = append(ret, user) + } + + return ret, nil +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + return []TemperatureStat{}, common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + _, _, version, err := PlatformInformation() + return version, err +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/host/host_openbsd_amd64.go new file mode 100644 index 0000000..afe0943 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_openbsd_amd64.go @@ -0,0 +1,31 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package host + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + sizeOfUtmp = 0x130 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Utmp struct { + Line [8]int8 + Name [32]int8 + Host [256]int8 + Time int64 +} +type Timeval struct { + Sec int64 + Usec int64 +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_solaris.go b/vendor/github.com/shirou/gopsutil/host/host_solaris.go new file mode 100644 index 0000000..bb83bfc --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_solaris.go @@ -0,0 +1,248 @@ +package host + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "github.com/shirou/gopsutil/internal/common" +) + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + result := &InfoStat{ + OS: runtime.GOOS, + } + + hostname, err := os.Hostname() + if err != nil { + return nil, err + } + result.Hostname = hostname + + // Parse versions from output of `uname(1)` + uname, err := exec.LookPath("/usr/bin/uname") + if err != nil { + return nil, err + } + + out, err := invoke.CommandWithContext(ctx, uname, "-srv") + if err != nil { + return nil, err + } + + fields := strings.Fields(string(out)) + if len(fields) >= 1 { + result.PlatformFamily = fields[0] + } + if len(fields) >= 2 { + result.KernelVersion = fields[1] + } + if len(fields) == 3 { + result.PlatformVersion = fields[2] + } + + // Find distribution name from /etc/release + fh, err := os.Open("/etc/release") + if err != nil { + return nil, err + } + defer fh.Close() + + sc := bufio.NewScanner(fh) + if sc.Scan() { + line := strings.TrimSpace(sc.Text()) + switch { + case strings.HasPrefix(line, "SmartOS"): + result.Platform = "SmartOS" + case strings.HasPrefix(line, "OpenIndiana"): + result.Platform = "OpenIndiana" + case strings.HasPrefix(line, "OmniOS"): + result.Platform = "OmniOS" + case strings.HasPrefix(line, "Open Storage"): + result.Platform = "NexentaStor" + case strings.HasPrefix(line, "Solaris"): + result.Platform = "Solaris" + case strings.HasPrefix(line, "Oracle Solaris"): + result.Platform = "Solaris" + default: + result.Platform = strings.Fields(line)[0] + } + } + + switch result.Platform { + case "SmartOS": + // If everything works, use the current zone ID as the HostID if present. + zonename, err := exec.LookPath("/usr/bin/zonename") + if err == nil { + out, err := invoke.CommandWithContext(ctx, zonename) + if err == nil { + sc := bufio.NewScanner(bytes.NewReader(out)) + for sc.Scan() { + line := sc.Text() + + // If we're in the global zone, rely on the hostname. + if line == "global" { + hostname, err := os.Hostname() + if err == nil { + result.HostID = hostname + } + } else { + result.HostID = strings.TrimSpace(line) + break + } + } + } + } + } + + // If HostID is still empty, use hostid(1), which can lie to callers but at + // this point there are no hardware facilities available. This behavior + // matches that of other supported OSes. + if result.HostID == "" { + hostID, err := exec.LookPath("/usr/bin/hostid") + if err == nil { + out, err := invoke.CommandWithContext(ctx, hostID) + if err == nil { + sc := bufio.NewScanner(bytes.NewReader(out)) + for sc.Scan() { + line := sc.Text() + result.HostID = strings.TrimSpace(line) + break + } + } + } + } + + // Find the boot time and calculate uptime relative to it + bootTime, err := BootTime() + if err != nil { + return nil, err + } + result.BootTime = bootTime + result.Uptime = uptimeSince(bootTime) + + // Count number of processes based on the number of entries in /proc + dirs, err := ioutil.ReadDir("/proc") + if err != nil { + return nil, err + } + result.Procs = uint64(len(dirs)) + + return result, nil +} + +var kstatMatch = regexp.MustCompile(`([^\s]+)[\s]+([^\s]*)`) + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + kstat, err := exec.LookPath("/usr/bin/kstat") + if err != nil { + return 0, err + } + + out, err := invoke.CommandWithContext(ctx, kstat, "-p", "unix:0:system_misc:boot_time") + if err != nil { + return 0, err + } + + kstats := kstatMatch.FindAllStringSubmatch(string(out), -1) + if len(kstats) != 1 { + return 0, fmt.Errorf("expected 1 kstat, found %d", len(kstats)) + } + + return strconv.ParseUint(kstats[0][2], 10, 64) +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + bootTime, err := BootTime() + if err != nil { + return 0, err + } + return uptimeSince(bootTime), nil +} + +func uptimeSince(since uint64) uint64 { + return uint64(time.Now().Unix()) - since +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + return []UserStat{}, common.ErrNotImplementedError +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + return []TemperatureStat{}, common.ErrNotImplementedError +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + // Parse versions from output of `uname(1)` + uname, err := exec.LookPath("/usr/bin/uname") + if err != nil { + return "", err + } + + out, err := invoke.CommandWithContext(ctx, uname, "-srv") + if err != nil { + return "", err + } + + fields := strings.Fields(string(out)) + if len(fields) >= 2 { + return fields[1], nil + } + return "", fmt.Errorf("could not get kernel version") +} + +func PlatformInformation() (platform string, family string, version string, err error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (platform string, family string, version string, err error) { + /* This is not finished yet at all. Please contribute! */ + + version, err = KernelVersion() + if err != nil { + return "", "", "", err + } + + return "solaris", "solaris", version, nil +} diff --git a/vendor/github.com/shirou/gopsutil/host/host_windows.go b/vendor/github.com/shirou/gopsutil/host/host_windows.go new file mode 100644 index 0000000..d89e191 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/host_windows.go @@ -0,0 +1,295 @@ +// +build windows + +package host + +import ( + "context" + "fmt" + "math" + "os" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + "unsafe" + + "github.com/StackExchange/wmi" + "github.com/shirou/gopsutil/internal/common" + process "github.com/shirou/gopsutil/process" + "golang.org/x/sys/windows" +) + +var ( + procGetSystemTimeAsFileTime = common.Modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetTickCount32 = common.Modkernel32.NewProc("GetTickCount") + procGetTickCount64 = common.Modkernel32.NewProc("GetTickCount64") + procRtlGetVersion = common.ModNt.NewProc("RtlGetVersion") +) + +// https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/wdm/ns-wdm-_osversioninfoexw +type osVersionInfoExW struct { + dwOSVersionInfoSize uint32 + dwMajorVersion uint32 + dwMinorVersion uint32 + dwBuildNumber uint32 + dwPlatformId uint32 + szCSDVersion [128]uint16 + wServicePackMajor uint16 + wServicePackMinor uint16 + wSuiteMask uint16 + wProductType uint8 + wReserved uint8 +} + +type msAcpi_ThermalZoneTemperature struct { + Active bool + CriticalTripPoint uint32 + CurrentTemperature uint32 + InstanceName string +} + +func Info() (*InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) (*InfoStat, error) { + ret := &InfoStat{ + OS: runtime.GOOS, + } + + { + hostname, err := os.Hostname() + if err == nil { + ret.Hostname = hostname + } + } + + { + platform, family, version, err := PlatformInformationWithContext(ctx) + if err == nil { + ret.Platform = platform + ret.PlatformFamily = family + ret.PlatformVersion = version + } else { + return ret, err + } + } + + { + boot, err := BootTime() + if err == nil { + ret.BootTime = boot + ret.Uptime, _ = Uptime() + } + } + + { + hostID, err := getMachineGuid() + if err == nil { + ret.HostID = strings.ToLower(hostID) + } + } + + { + procs, err := process.Pids() + if err == nil { + ret.Procs = uint64(len(procs)) + } + } + + return ret, nil +} + +func getMachineGuid() (string, error) { + // there has been reports of issues on 32bit using golang.org/x/sys/windows/registry, see https://github.com/shirou/gopsutil/pull/312#issuecomment-277422612 + // for rationale of using windows.RegOpenKeyEx/RegQueryValueEx instead of registry.OpenKey/GetStringValue + var h windows.Handle + err := windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, windows.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, windows.KEY_READ|windows.KEY_WOW64_64KEY, &h) + if err != nil { + return "", err + } + defer windows.RegCloseKey(h) + + const windowsRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16 + const uuidLen = 36 + + var regBuf [windowsRegBufLen]uint16 + bufLen := uint32(windowsRegBufLen) + var valType uint32 + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err != nil { + return "", err + } + + hostID := windows.UTF16ToString(regBuf[:]) + hostIDLen := len(hostID) + if hostIDLen != uuidLen { + return "", fmt.Errorf("HostID incorrect: %q\n", hostID) + } + + return hostID, nil +} + +func Uptime() (uint64, error) { + return UptimeWithContext(context.Background()) +} + +func UptimeWithContext(ctx context.Context) (uint64, error) { + procGetTickCount := procGetTickCount64 + err := procGetTickCount64.Find() + if err != nil { + procGetTickCount = procGetTickCount32 // handle WinXP, but keep in mind that "the time will wrap around to zero if the system is run continuously for 49.7 days." from MSDN + } + r1, _, lastErr := syscall.Syscall(procGetTickCount.Addr(), 0, 0, 0, 0) + if lastErr != 0 { + return 0, lastErr + } + return uint64((time.Duration(r1) * time.Millisecond).Seconds()), nil +} + +func bootTimeFromUptime(up uint64) uint64 { + return uint64(time.Now().Unix()) - up +} + +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + +func BootTime() (uint64, error) { + return BootTimeWithContext(context.Background()) +} + +func BootTimeWithContext(ctx context.Context) (uint64, error) { + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + up, err := Uptime() + if err != nil { + return 0, err + } + t = bootTimeFromUptime(up) + atomic.StoreUint64(&cachedBootTime, t) + return t, nil +} + +func PlatformInformation() (platform string, family string, version string, err error) { + return PlatformInformationWithContext(context.Background()) +} + +func PlatformInformationWithContext(ctx context.Context) (platform string, family string, version string, err error) { + // GetVersionEx lies on Windows 8.1 and returns as Windows 8 if we don't declare compatibility in manifest + // RtlGetVersion bypasses this lying layer and returns the true Windows version + // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/wdm/nf-wdm-rtlgetversion + // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/wdm/ns-wdm-_osversioninfoexw + var osInfo osVersionInfoExW + osInfo.dwOSVersionInfoSize = uint32(unsafe.Sizeof(osInfo)) + ret, _, err := procRtlGetVersion.Call(uintptr(unsafe.Pointer(&osInfo))) + if ret != 0 { + return + } + + // Platform + var h windows.Handle // like getMachineGuid(), we query the registry using the raw windows.RegOpenKeyEx/RegQueryValueEx + err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, windows.StringToUTF16Ptr(`SOFTWARE\Microsoft\Windows NT\CurrentVersion`), 0, windows.KEY_READ|windows.KEY_WOW64_64KEY, &h) + if err != nil { + return + } + defer windows.RegCloseKey(h) + var bufLen uint32 + var valType uint32 + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`ProductName`), nil, &valType, nil, &bufLen) + if err != nil { + return + } + regBuf := make([]uint16, bufLen/2+1) + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`ProductName`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err != nil { + return + } + platform = windows.UTF16ToString(regBuf[:]) + if !strings.HasPrefix(platform, "Microsoft") { + platform = "Microsoft " + platform + } + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`CSDVersion`), nil, &valType, nil, &bufLen) // append Service Pack number, only on success + if err == nil { // don't return an error if only the Service Pack retrieval fails + regBuf = make([]uint16, bufLen/2+1) + err = windows.RegQueryValueEx(h, windows.StringToUTF16Ptr(`CSDVersion`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err == nil { + platform += " " + windows.UTF16ToString(regBuf[:]) + } + } + + // PlatformFamily + switch osInfo.wProductType { + case 1: + family = "Standalone Workstation" + case 2: + family = "Server (Domain Controller)" + case 3: + family = "Server" + } + + // Platform Version + version = fmt.Sprintf("%d.%d.%d Build %d", osInfo.dwMajorVersion, osInfo.dwMinorVersion, osInfo.dwBuildNumber, osInfo.dwBuildNumber) + + return platform, family, version, nil +} + +func Users() ([]UserStat, error) { + return UsersWithContext(context.Background()) +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + var ret []UserStat + + return ret, nil +} + +func SensorsTemperatures() ([]TemperatureStat, error) { + return SensorsTemperaturesWithContext(context.Background()) +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + var ret []TemperatureStat + var dst []msAcpi_ThermalZoneTemperature + q := wmi.CreateQuery(&dst, "") + if err := common.WMIQueryWithContext(ctx, q, &dst, nil, "root/wmi"); err != nil { + return ret, err + } + + for _, v := range dst { + ts := TemperatureStat{ + SensorKey: v.InstanceName, + Temperature: kelvinToCelsius(v.CurrentTemperature, 2), + } + ret = append(ret, ts) + } + + return ret, nil +} + +func kelvinToCelsius(temp uint32, n int) float64 { + // wmi return temperature Kelvin * 10, so need to divide the result by 10, + // and then minus 273.15 to get °Celsius. + t := float64(temp/10) - 273.15 + n10 := math.Pow10(n) + return math.Trunc((t+0.5/n10)*n10) / n10 +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func KernelVersion() (string, error) { + return KernelVersionWithContext(context.Background()) +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + _, _, version, err := PlatformInformation() + return version, err +} diff --git a/vendor/github.com/shirou/gopsutil/host/types_darwin.go b/vendor/github.com/shirou/gopsutil/host/types_darwin.go new file mode 100644 index 0000000..b858227 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/types_darwin.go @@ -0,0 +1,17 @@ +// +build ignore +// plus hand editing about timeval + +/* +Input to cgo -godefs. +*/ + +package host + +/* +#include +#include +*/ +import "C" + +type Utmpx C.struct_utmpx +type Timeval C.struct_timeval diff --git a/vendor/github.com/shirou/gopsutil/host/types_freebsd.go b/vendor/github.com/shirou/gopsutil/host/types_freebsd.go new file mode 100644 index 0000000..e70677f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/types_freebsd.go @@ -0,0 +1,44 @@ +// +build ignore + +/* +Input to cgo -godefs. +*/ + +package host + +/* +#define KERNEL +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong + sizeOfUtmpx = C.sizeof_struct_utmpx +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +type Utmp C.struct_utmp +type Utmpx C.struct_utmpx +type Timeval C.struct_timeval diff --git a/vendor/github.com/shirou/gopsutil/host/types_linux.go b/vendor/github.com/shirou/gopsutil/host/types_linux.go new file mode 100644 index 0000000..8adecb6 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/types_linux.go @@ -0,0 +1,42 @@ +// +build ignore + +/* +Input to cgo -godefs. +*/ + +package host + +/* +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong + sizeOfUtmp = C.sizeof_struct_utmp +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +type utmp C.struct_utmp +type exit_status C.struct_exit_status +type timeval C.struct_timeval diff --git a/vendor/github.com/shirou/gopsutil/host/types_openbsd.go b/vendor/github.com/shirou/gopsutil/host/types_openbsd.go new file mode 100644 index 0000000..9ebb97c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/host/types_openbsd.go @@ -0,0 +1,43 @@ +// +build ignore + +/* +Input to cgo -godefs. +*/ + +package host + +/* +#define KERNEL +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong + sizeOfUtmp = C.sizeof_struct_utmp +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +type Utmp C.struct_utmp +type Timeval C.struct_timeval diff --git a/vendor/github.com/shirou/gopsutil/internal/common/binary.go b/vendor/github.com/shirou/gopsutil/internal/common/binary.go new file mode 100644 index 0000000..9b5dc55 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/binary.go @@ -0,0 +1,634 @@ +package common + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package binary implements simple translation between numbers and byte +// sequences and encoding and decoding of varints. +// +// Numbers are translated by reading and writing fixed-size values. +// A fixed-size value is either a fixed-size arithmetic +// type (int8, uint8, int16, float32, complex64, ...) +// or an array or struct containing only fixed-size values. +// +// The varint functions encode and decode single integer values using +// a variable-length encoding; smaller values require fewer bytes. +// For a specification, see +// http://code.google.com/apis/protocolbuffers/docs/encoding.html. +// +// This package favors simplicity over efficiency. Clients that require +// high-performance serialization, especially for large data structures, +// should look at more advanced solutions such as the encoding/gob +// package or protocol buffers. +import ( + "errors" + "io" + "math" + "reflect" +) + +// A ByteOrder specifies how to convert byte sequences into +// 16-, 32-, or 64-bit unsigned integers. +type ByteOrder interface { + Uint16([]byte) uint16 + Uint32([]byte) uint32 + Uint64([]byte) uint64 + PutUint16([]byte, uint16) + PutUint32([]byte, uint32) + PutUint64([]byte, uint64) + String() string +} + +// LittleEndian is the little-endian implementation of ByteOrder. +var LittleEndian littleEndian + +// BigEndian is the big-endian implementation of ByteOrder. +var BigEndian bigEndian + +type littleEndian struct{} + +func (littleEndian) Uint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 } + +func (littleEndian) PutUint16(b []byte, v uint16) { + b[0] = byte(v) + b[1] = byte(v >> 8) +} + +func (littleEndian) Uint32(b []byte) uint32 { + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) PutUint32(b []byte, v uint32) { + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (littleEndian) Uint64(b []byte) uint64 { + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (littleEndian) PutUint64(b []byte, v uint64) { + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) + b[4] = byte(v >> 32) + b[5] = byte(v >> 40) + b[6] = byte(v >> 48) + b[7] = byte(v >> 56) +} + +func (littleEndian) String() string { return "LittleEndian" } + +func (littleEndian) GoString() string { return "binary.LittleEndian" } + +type bigEndian struct{} + +func (bigEndian) Uint16(b []byte) uint16 { return uint16(b[1]) | uint16(b[0])<<8 } + +func (bigEndian) PutUint16(b []byte, v uint16) { + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (bigEndian) Uint32(b []byte) uint32 { + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) PutUint32(b []byte, v uint32) { + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (bigEndian) Uint64(b []byte) uint64 { + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +func (bigEndian) PutUint64(b []byte, v uint64) { + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) +} + +func (bigEndian) String() string { return "BigEndian" } + +func (bigEndian) GoString() string { return "binary.BigEndian" } + +// Read reads structured binary data from r into data. +// Data must be a pointer to a fixed-size value or a slice +// of fixed-size values. +// Bytes read from r are decoded using the specified byte order +// and written to successive fields of the data. +// When reading into structs, the field data for fields with +// blank (_) field names is skipped; i.e., blank field names +// may be used for padding. +// When reading into a struct, all non-blank fields must be exported. +func Read(r io.Reader, order ByteOrder, data interface{}) error { + // Fast path for basic types and slices. + if n := intDataSize(data); n != 0 { + var b [8]byte + var bs []byte + if n > len(b) { + bs = make([]byte, n) + } else { + bs = b[:n] + } + if _, err := io.ReadFull(r, bs); err != nil { + return err + } + switch data := data.(type) { + case *int8: + *data = int8(b[0]) + case *uint8: + *data = b[0] + case *int16: + *data = int16(order.Uint16(bs)) + case *uint16: + *data = order.Uint16(bs) + case *int32: + *data = int32(order.Uint32(bs)) + case *uint32: + *data = order.Uint32(bs) + case *int64: + *data = int64(order.Uint64(bs)) + case *uint64: + *data = order.Uint64(bs) + case []int8: + for i, x := range bs { // Easier to loop over the input for 8-bit values. + data[i] = int8(x) + } + case []uint8: + copy(data, bs) + case []int16: + for i := range data { + data[i] = int16(order.Uint16(bs[2*i:])) + } + case []uint16: + for i := range data { + data[i] = order.Uint16(bs[2*i:]) + } + case []int32: + for i := range data { + data[i] = int32(order.Uint32(bs[4*i:])) + } + case []uint32: + for i := range data { + data[i] = order.Uint32(bs[4*i:]) + } + case []int64: + for i := range data { + data[i] = int64(order.Uint64(bs[8*i:])) + } + case []uint64: + for i := range data { + data[i] = order.Uint64(bs[8*i:]) + } + } + return nil + } + + // Fallback to reflect-based decoding. + v := reflect.ValueOf(data) + size := -1 + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + size = dataSize(v) + case reflect.Slice: + size = dataSize(v) + } + if size < 0 { + return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String()) + } + d := &decoder{order: order, buf: make([]byte, size)} + if _, err := io.ReadFull(r, d.buf); err != nil { + return err + } + d.value(v) + return nil +} + +// Write writes the binary representation of data into w. +// Data must be a fixed-size value or a slice of fixed-size +// values, or a pointer to such data. +// Bytes written to w are encoded using the specified byte order +// and read from successive fields of the data. +// When writing structs, zero values are written for fields +// with blank (_) field names. +func Write(w io.Writer, order ByteOrder, data interface{}) error { + // Fast path for basic types and slices. + if n := intDataSize(data); n != 0 { + var b [8]byte + var bs []byte + if n > len(b) { + bs = make([]byte, n) + } else { + bs = b[:n] + } + switch v := data.(type) { + case *int8: + bs = b[:1] + b[0] = byte(*v) + case int8: + bs = b[:1] + b[0] = byte(v) + case []int8: + for i, x := range v { + bs[i] = byte(x) + } + case *uint8: + bs = b[:1] + b[0] = *v + case uint8: + bs = b[:1] + b[0] = byte(v) + case []uint8: + bs = v + case *int16: + bs = b[:2] + order.PutUint16(bs, uint16(*v)) + case int16: + bs = b[:2] + order.PutUint16(bs, uint16(v)) + case []int16: + for i, x := range v { + order.PutUint16(bs[2*i:], uint16(x)) + } + case *uint16: + bs = b[:2] + order.PutUint16(bs, *v) + case uint16: + bs = b[:2] + order.PutUint16(bs, v) + case []uint16: + for i, x := range v { + order.PutUint16(bs[2*i:], x) + } + case *int32: + bs = b[:4] + order.PutUint32(bs, uint32(*v)) + case int32: + bs = b[:4] + order.PutUint32(bs, uint32(v)) + case []int32: + for i, x := range v { + order.PutUint32(bs[4*i:], uint32(x)) + } + case *uint32: + bs = b[:4] + order.PutUint32(bs, *v) + case uint32: + bs = b[:4] + order.PutUint32(bs, v) + case []uint32: + for i, x := range v { + order.PutUint32(bs[4*i:], x) + } + case *int64: + bs = b[:8] + order.PutUint64(bs, uint64(*v)) + case int64: + bs = b[:8] + order.PutUint64(bs, uint64(v)) + case []int64: + for i, x := range v { + order.PutUint64(bs[8*i:], uint64(x)) + } + case *uint64: + bs = b[:8] + order.PutUint64(bs, *v) + case uint64: + bs = b[:8] + order.PutUint64(bs, v) + case []uint64: + for i, x := range v { + order.PutUint64(bs[8*i:], x) + } + } + _, err := w.Write(bs) + return err + } + + // Fallback to reflect-based encoding. + v := reflect.Indirect(reflect.ValueOf(data)) + size := dataSize(v) + if size < 0 { + return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String()) + } + buf := make([]byte, size) + e := &encoder{order: order, buf: buf} + e.value(v) + _, err := w.Write(buf) + return err +} + +// Size returns how many bytes Write would generate to encode the value v, which +// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data. +// If v is neither of these, Size returns -1. +func Size(v interface{}) int { + return dataSize(reflect.Indirect(reflect.ValueOf(v))) +} + +// dataSize returns the number of bytes the actual data represented by v occupies in memory. +// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice +// it returns the length of the slice times the element size and does not count the memory +// occupied by the header. If the type of v is not acceptable, dataSize returns -1. +func dataSize(v reflect.Value) int { + if v.Kind() == reflect.Slice { + if s := sizeof(v.Type().Elem()); s >= 0 { + return s * v.Len() + } + return -1 + } + return sizeof(v.Type()) +} + +// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable. +func sizeof(t reflect.Type) int { + switch t.Kind() { + case reflect.Array: + if s := sizeof(t.Elem()); s >= 0 { + return s * t.Len() + } + + case reflect.Struct: + sum := 0 + for i, n := 0, t.NumField(); i < n; i++ { + s := sizeof(t.Field(i).Type) + if s < 0 { + return -1 + } + sum += s + } + return sum + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.Ptr: + return int(t.Size()) + } + + return -1 +} + +type coder struct { + order ByteOrder + buf []byte +} + +type decoder coder +type encoder coder + +func (d *decoder) uint8() uint8 { + x := d.buf[0] + d.buf = d.buf[1:] + return x +} + +func (e *encoder) uint8(x uint8) { + e.buf[0] = x + e.buf = e.buf[1:] +} + +func (d *decoder) uint16() uint16 { + x := d.order.Uint16(d.buf[0:2]) + d.buf = d.buf[2:] + return x +} + +func (e *encoder) uint16(x uint16) { + e.order.PutUint16(e.buf[0:2], x) + e.buf = e.buf[2:] +} + +func (d *decoder) uint32() uint32 { + x := d.order.Uint32(d.buf[0:4]) + d.buf = d.buf[4:] + return x +} + +func (e *encoder) uint32(x uint32) { + e.order.PutUint32(e.buf[0:4], x) + e.buf = e.buf[4:] +} + +func (d *decoder) uint64() uint64 { + x := d.order.Uint64(d.buf[0:8]) + d.buf = d.buf[8:] + return x +} + +func (e *encoder) uint64(x uint64) { + e.order.PutUint64(e.buf[0:8], x) + e.buf = e.buf[8:] +} + +func (d *decoder) int8() int8 { return int8(d.uint8()) } + +func (e *encoder) int8(x int8) { e.uint8(uint8(x)) } + +func (d *decoder) int16() int16 { return int16(d.uint16()) } + +func (e *encoder) int16(x int16) { e.uint16(uint16(x)) } + +func (d *decoder) int32() int32 { return int32(d.uint32()) } + +func (e *encoder) int32(x int32) { e.uint32(uint32(x)) } + +func (d *decoder) int64() int64 { return int64(d.uint64()) } + +func (e *encoder) int64(x int64) { e.uint64(uint64(x)) } + +func (d *decoder) value(v reflect.Value) { + switch v.Kind() { + case reflect.Array: + l := v.Len() + for i := 0; i < l; i++ { + d.value(v.Index(i)) + } + + case reflect.Struct: + t := v.Type() + l := v.NumField() + for i := 0; i < l; i++ { + // Note: Calling v.CanSet() below is an optimization. + // It would be sufficient to check the field name, + // but creating the StructField info for each field is + // costly (run "go test -bench=ReadStruct" and compare + // results when making changes to this code). + if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + d.value(v) + } else { + d.skip(v) + } + } + + case reflect.Slice: + l := v.Len() + for i := 0; i < l; i++ { + d.value(v.Index(i)) + } + + case reflect.Int8: + v.SetInt(int64(d.int8())) + case reflect.Int16: + v.SetInt(int64(d.int16())) + case reflect.Int32: + v.SetInt(int64(d.int32())) + case reflect.Int64: + v.SetInt(d.int64()) + + case reflect.Uint8: + v.SetUint(uint64(d.uint8())) + case reflect.Uint16: + v.SetUint(uint64(d.uint16())) + case reflect.Uint32: + v.SetUint(uint64(d.uint32())) + case reflect.Uint64: + v.SetUint(d.uint64()) + + case reflect.Float32: + v.SetFloat(float64(math.Float32frombits(d.uint32()))) + case reflect.Float64: + v.SetFloat(math.Float64frombits(d.uint64())) + + case reflect.Complex64: + v.SetComplex(complex( + float64(math.Float32frombits(d.uint32())), + float64(math.Float32frombits(d.uint32())), + )) + case reflect.Complex128: + v.SetComplex(complex( + math.Float64frombits(d.uint64()), + math.Float64frombits(d.uint64()), + )) + } +} + +func (e *encoder) value(v reflect.Value) { + switch v.Kind() { + case reflect.Array: + l := v.Len() + for i := 0; i < l; i++ { + e.value(v.Index(i)) + } + + case reflect.Struct: + t := v.Type() + l := v.NumField() + for i := 0; i < l; i++ { + // see comment for corresponding code in decoder.value() + if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + e.value(v) + } else { + e.skip(v) + } + } + + case reflect.Slice: + l := v.Len() + for i := 0; i < l; i++ { + e.value(v.Index(i)) + } + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch v.Type().Kind() { + case reflect.Int8: + e.int8(int8(v.Int())) + case reflect.Int16: + e.int16(int16(v.Int())) + case reflect.Int32: + e.int32(int32(v.Int())) + case reflect.Int64: + e.int64(v.Int()) + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch v.Type().Kind() { + case reflect.Uint8: + e.uint8(uint8(v.Uint())) + case reflect.Uint16: + e.uint16(uint16(v.Uint())) + case reflect.Uint32: + e.uint32(uint32(v.Uint())) + case reflect.Uint64: + e.uint64(v.Uint()) + } + + case reflect.Float32, reflect.Float64: + switch v.Type().Kind() { + case reflect.Float32: + e.uint32(math.Float32bits(float32(v.Float()))) + case reflect.Float64: + e.uint64(math.Float64bits(v.Float())) + } + + case reflect.Complex64, reflect.Complex128: + switch v.Type().Kind() { + case reflect.Complex64: + x := v.Complex() + e.uint32(math.Float32bits(float32(real(x)))) + e.uint32(math.Float32bits(float32(imag(x)))) + case reflect.Complex128: + x := v.Complex() + e.uint64(math.Float64bits(real(x))) + e.uint64(math.Float64bits(imag(x))) + } + } +} + +func (d *decoder) skip(v reflect.Value) { + d.buf = d.buf[dataSize(v):] +} + +func (e *encoder) skip(v reflect.Value) { + n := dataSize(v) + for i := range e.buf[0:n] { + e.buf[i] = 0 + } + e.buf = e.buf[n:] +} + +// intDataSize returns the size of the data required to represent the data when encoded. +// It returns zero if the type cannot be implemented by the fast path in Read or Write. +func intDataSize(data interface{}) int { + switch data := data.(type) { + case int8, *int8, *uint8: + return 1 + case []int8: + return len(data) + case []uint8: + return len(data) + case int16, *int16, *uint16: + return 2 + case []int16: + return 2 * len(data) + case []uint16: + return 2 * len(data) + case int32, *int32, *uint32: + return 4 + case []int32: + return 4 * len(data) + case []uint32: + return 4 * len(data) + case int64, *int64, *uint64: + return 8 + case []int64: + return 8 * len(data) + case []uint64: + return 8 * len(data) + } + return 0 +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common.go b/vendor/github.com/shirou/gopsutil/internal/common/common.go new file mode 100644 index 0000000..71c2257 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common.go @@ -0,0 +1,392 @@ +package common + +// +// gopsutil is a port of psutil(http://pythonhosted.org/psutil/). +// This covers these architectures. +// - linux (amd64, arm) +// - freebsd (amd64) +// - windows (amd64) +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" +) + +var ( + Timeout = 3 * time.Second + ErrTimeout = errors.New("command timed out") +) + +type Invoker interface { + Command(string, ...string) ([]byte, error) + CommandWithContext(context.Context, string, ...string) ([]byte, error) +} + +type Invoke struct{} + +func (i Invoke) Command(name string, arg ...string) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), Timeout) + defer cancel() + return i.CommandWithContext(ctx, name, arg...) +} + +func (i Invoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { + cmd := exec.CommandContext(ctx, name, arg...) + + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + if err := cmd.Start(); err != nil { + return buf.Bytes(), err + } + + if err := cmd.Wait(); err != nil { + return buf.Bytes(), err + } + + return buf.Bytes(), nil +} + +type FakeInvoke struct { + Suffix string // Suffix species expected file name suffix such as "fail" + Error error // If Error specfied, return the error. +} + +// Command in FakeInvoke returns from expected file if exists. +func (i FakeInvoke) Command(name string, arg ...string) ([]byte, error) { + if i.Error != nil { + return []byte{}, i.Error + } + + arch := runtime.GOOS + + commandName := filepath.Base(name) + + fname := strings.Join(append([]string{commandName}, arg...), "") + fname = url.QueryEscape(fname) + fpath := path.Join("testdata", arch, fname) + if i.Suffix != "" { + fpath += "_" + i.Suffix + } + if PathExists(fpath) { + return ioutil.ReadFile(fpath) + } + return []byte{}, fmt.Errorf("could not find testdata: %s", fpath) +} + +func (i FakeInvoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { + return i.Command(name, arg...) +} + +var ErrNotImplementedError = errors.New("not implemented yet") + +// ReadLines reads contents from a file and splits them by new lines. +// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). +func ReadLines(filename string) ([]string, error) { + return ReadLinesOffsetN(filename, 0, -1) +} + +// ReadLines reads contents from file and splits them by new line. +// The offset tells at which line number to start. +// The count determines the number of lines to read (starting from offset): +// n >= 0: at most n lines +// n < 0: whole file +func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { + f, err := os.Open(filename) + if err != nil { + return []string{""}, err + } + defer f.Close() + + var ret []string + + r := bufio.NewReader(f) + for i := 0; i < n+int(offset) || n < 0; i++ { + line, err := r.ReadString('\n') + if err != nil { + break + } + if i < int(offset) { + continue + } + ret = append(ret, strings.Trim(line, "\n")) + } + + return ret, nil +} + +func IntToString(orig []int8) string { + ret := make([]byte, len(orig)) + size := -1 + for i, o := range orig { + if o == 0 { + size = i + break + } + ret[i] = byte(o) + } + if size == -1 { + size = len(orig) + } + + return string(ret[0:size]) +} + +func UintToString(orig []uint8) string { + ret := make([]byte, len(orig)) + size := -1 + for i, o := range orig { + if o == 0 { + size = i + break + } + ret[i] = byte(o) + } + if size == -1 { + size = len(orig) + } + + return string(ret[0:size]) +} + +func ByteToString(orig []byte) string { + n := -1 + l := -1 + for i, b := range orig { + // skip left side null + if l == -1 && b == 0 { + continue + } + if l == -1 { + l = i + } + + if b == 0 { + break + } + n = i + 1 + } + if n == -1 { + return string(orig) + } + return string(orig[l:n]) +} + +// ReadInts reads contents from single line file and returns them as []int32. +func ReadInts(filename string) ([]int64, error) { + f, err := os.Open(filename) + if err != nil { + return []int64{}, err + } + defer f.Close() + + var ret []int64 + + r := bufio.NewReader(f) + + // The int files that this is concerned with should only be one liners. + line, err := r.ReadString('\n') + if err != nil { + return []int64{}, err + } + + i, err := strconv.ParseInt(strings.Trim(line, "\n"), 10, 32) + if err != nil { + return []int64{}, err + } + ret = append(ret, i) + + return ret, nil +} + +// Parse to int32 without error +func mustParseInt32(val string) int32 { + vv, _ := strconv.ParseInt(val, 10, 32) + return int32(vv) +} + +// Parse to uint64 without error +func mustParseUint64(val string) uint64 { + vv, _ := strconv.ParseInt(val, 10, 64) + return uint64(vv) +} + +// Parse to Float64 without error +func mustParseFloat64(val string) float64 { + vv, _ := strconv.ParseFloat(val, 64) + return vv +} + +// StringsHas checks the target string slice contains src or not +func StringsHas(target []string, src string) bool { + for _, t := range target { + if strings.TrimSpace(t) == src { + return true + } + } + return false +} + +// StringsContains checks the src in any string of the target string slice +func StringsContains(target []string, src string) bool { + for _, t := range target { + if strings.Contains(t, src) { + return true + } + } + return false +} + +// IntContains checks the src in any int of the target int slice. +func IntContains(target []int, src int) bool { + for _, t := range target { + if src == t { + return true + } + } + return false +} + +// get struct attributes. +// This method is used only for debugging platform dependent code. +func attributes(m interface{}) map[string]reflect.Type { + typ := reflect.TypeOf(m) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + attrs := make(map[string]reflect.Type) + if typ.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < typ.NumField(); i++ { + p := typ.Field(i) + if !p.Anonymous { + attrs[p.Name] = p.Type + } + } + + return attrs +} + +func PathExists(filename string) bool { + if _, err := os.Stat(filename); err == nil { + return true + } + return false +} + +//GetEnv retrieves the environment variable key. If it does not exist it returns the default. +func GetEnv(key string, dfault string, combineWith ...string) string { + value := os.Getenv(key) + if value == "" { + value = dfault + } + + switch len(combineWith) { + case 0: + return value + case 1: + return filepath.Join(value, combineWith[0]) + default: + all := make([]string, len(combineWith)+1) + all[0] = value + copy(all[1:], combineWith) + return filepath.Join(all...) + } + panic("invalid switch case") +} + +func HostProc(combineWith ...string) string { + return GetEnv("HOST_PROC", "/proc", combineWith...) +} + +func HostSys(combineWith ...string) string { + return GetEnv("HOST_SYS", "/sys", combineWith...) +} + +func HostEtc(combineWith ...string) string { + return GetEnv("HOST_ETC", "/etc", combineWith...) +} + +func HostVar(combineWith ...string) string { + return GetEnv("HOST_VAR", "/var", combineWith...) +} + +func HostRun(combineWith ...string) string { + return GetEnv("HOST_RUN", "/run", combineWith...) +} + +// https://gist.github.com/kylelemons/1525278 +func Pipeline(cmds ...*exec.Cmd) ([]byte, []byte, error) { + // Require at least one command + if len(cmds) < 1 { + return nil, nil, nil + } + + // Collect the output from the command(s) + var output bytes.Buffer + var stderr bytes.Buffer + + last := len(cmds) - 1 + for i, cmd := range cmds[:last] { + var err error + // Connect each command's stdin to the previous command's stdout + if cmds[i+1].Stdin, err = cmd.StdoutPipe(); err != nil { + return nil, nil, err + } + // Connect each command's stderr to a buffer + cmd.Stderr = &stderr + } + + // Connect the output and error for the last command + cmds[last].Stdout, cmds[last].Stderr = &output, &stderr + + // Start each command + for _, cmd := range cmds { + if err := cmd.Start(); err != nil { + return output.Bytes(), stderr.Bytes(), err + } + } + + // Wait for each command to complete + for _, cmd := range cmds { + if err := cmd.Wait(); err != nil { + return output.Bytes(), stderr.Bytes(), err + } + } + + // Return the pipeline output and the collected standard error + return output.Bytes(), stderr.Bytes(), nil +} + +// getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running +// sysctl commands (see DoSysctrl). +func getSysctrlEnv(env []string) []string { + foundLC := false + for i, line := range env { + if strings.HasPrefix(line, "LC_ALL") { + env[i] = "LC_ALL=C" + foundLC = true + } + } + if !foundLC { + env = append(env, "LC_ALL=C") + } + return env +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/internal/common/common_darwin.go new file mode 100644 index 0000000..3e85cc0 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_darwin.go @@ -0,0 +1,69 @@ +// +build darwin + +package common + +import ( + "context" + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { + sysctl, err := exec.LookPath("/usr/sbin/sysctl") + if err != nil { + return []string{}, err + } + cmd := exec.CommandContext(ctx, sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_freebsd.go b/vendor/github.com/shirou/gopsutil/internal/common/common_freebsd.go new file mode 100644 index 0000000..107e2c9 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_freebsd.go @@ -0,0 +1,69 @@ +// +build freebsd openbsd + +package common + +import ( + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func DoSysctrl(mib string) ([]string, error) { + sysctl, err := exec.LookPath("/sbin/sysctl") + if err != nil { + return []string{}, err + } + cmd := exec.Command(sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/internal/common/common_linux.go new file mode 100644 index 0000000..4e829e0 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_linux.go @@ -0,0 +1,41 @@ +// +build linux + +package common + +import ( + "os" + "os/exec" + "strings" +) + +func DoSysctrl(mib string) ([]string, error) { + sysctl, err := exec.LookPath("/sbin/sysctl") + if err != nil { + return []string{}, err + } + cmd := exec.Command(sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func NumProcs() (uint64, error) { + f, err := os.Open(HostProc()) + if err != nil { + return 0, err + } + defer f.Close() + + list, err := f.Readdirnames(-1) + if err != nil { + return 0, err + } + return uint64(len(list)), err +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_openbsd.go b/vendor/github.com/shirou/gopsutil/internal/common/common_openbsd.go new file mode 100644 index 0000000..398f785 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_openbsd.go @@ -0,0 +1,69 @@ +// +build openbsd + +package common + +import ( + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func DoSysctrl(mib string) ([]string, error) { + sysctl, err := exec.LookPath("/sbin/sysctl") + if err != nil { + return []string{}, err + } + cmd := exec.Command(sysctl, "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_unix.go b/vendor/github.com/shirou/gopsutil/internal/common/common_unix.go new file mode 100644 index 0000000..750a592 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_unix.go @@ -0,0 +1,67 @@ +// +build linux freebsd darwin openbsd + +package common + +import ( + "context" + "os/exec" + "strconv" + "strings" +) + +func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args ...string) ([]string, error) { + var cmd []string + if pid == 0 { // will get from all processes. + cmd = []string{"-a", "-n", "-P"} + } else { + cmd = []string{"-a", "-n", "-P", "-p", strconv.Itoa(int(pid))} + } + cmd = append(cmd, args...) + lsof, err := exec.LookPath("lsof") + if err != nil { + return []string{}, err + } + out, err := invoke.CommandWithContext(ctx, lsof, cmd...) + if err != nil { + // if no pid found, lsof returnes code 1. + if err.Error() == "exit status 1" && len(out) == 0 { + return []string{}, nil + } + } + lines := strings.Split(string(out), "\n") + + var ret []string + for _, l := range lines[1:] { + if len(l) == 0 { + continue + } + ret = append(ret, l) + } + return ret, nil +} + +func CallPgrepWithContext(ctx context.Context, invoke Invoker, pid int32) ([]int32, error) { + var cmd []string + cmd = []string{"-P", strconv.Itoa(int(pid))} + pgrep, err := exec.LookPath("pgrep") + if err != nil { + return []int32{}, err + } + out, err := invoke.CommandWithContext(ctx, pgrep, cmd...) + if err != nil { + return []int32{}, err + } + lines := strings.Split(string(out), "\n") + ret := make([]int32, 0, len(lines)) + for _, l := range lines { + if len(l) == 0 { + continue + } + i, err := strconv.Atoi(l) + if err != nil { + continue + } + ret = append(ret, int32(i)) + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/internal/common/common_windows.go b/vendor/github.com/shirou/gopsutil/internal/common/common_windows.go new file mode 100644 index 0000000..0997c9b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/internal/common/common_windows.go @@ -0,0 +1,135 @@ +// +build windows + +package common + +import ( + "context" + "unsafe" + + "github.com/StackExchange/wmi" + "golang.org/x/sys/windows" +) + +// for double values +type PDH_FMT_COUNTERVALUE_DOUBLE struct { + CStatus uint32 + DoubleValue float64 +} + +// for 64 bit integer values +type PDH_FMT_COUNTERVALUE_LARGE struct { + CStatus uint32 + LargeValue int64 +} + +// for long values +type PDH_FMT_COUNTERVALUE_LONG struct { + CStatus uint32 + LongValue int32 + padding [4]byte +} + +// windows system const +const ( + ERROR_SUCCESS = 0 + ERROR_FILE_NOT_FOUND = 2 + DRIVE_REMOVABLE = 2 + DRIVE_FIXED = 3 + HKEY_LOCAL_MACHINE = 0x80000002 + RRF_RT_REG_SZ = 0x00000002 + RRF_RT_REG_DWORD = 0x00000010 + PDH_FMT_LONG = 0x00000100 + PDH_FMT_DOUBLE = 0x00000200 + PDH_FMT_LARGE = 0x00000400 + PDH_INVALID_DATA = 0xc0000bc6 + PDH_INVALID_HANDLE = 0xC0000bbc + PDH_NO_DATA = 0x800007d5 +) + +var ( + Modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + ModNt = windows.NewLazySystemDLL("ntdll.dll") + ModPdh = windows.NewLazySystemDLL("pdh.dll") + ModPsapi = windows.NewLazySystemDLL("psapi.dll") + + ProcGetSystemTimes = Modkernel32.NewProc("GetSystemTimes") + ProcNtQuerySystemInformation = ModNt.NewProc("NtQuerySystemInformation") + PdhOpenQuery = ModPdh.NewProc("PdhOpenQuery") + PdhAddCounter = ModPdh.NewProc("PdhAddCounterW") + PdhCollectQueryData = ModPdh.NewProc("PdhCollectQueryData") + PdhGetFormattedCounterValue = ModPdh.NewProc("PdhGetFormattedCounterValue") + PdhCloseQuery = ModPdh.NewProc("PdhCloseQuery") +) + +type FILETIME struct { + DwLowDateTime uint32 + DwHighDateTime uint32 +} + +// borrowed from net/interface_windows.go +func BytePtrToString(p *uint8) string { + a := (*[10000]uint8)(unsafe.Pointer(p)) + i := 0 + for a[i] != 0 { + i++ + } + return string(a[:i]) +} + +// CounterInfo +// copied from https://github.com/mackerelio/mackerel-agent/ +type CounterInfo struct { + PostName string + CounterName string + Counter windows.Handle +} + +// CreateQuery XXX +// copied from https://github.com/mackerelio/mackerel-agent/ +func CreateQuery() (windows.Handle, error) { + var query windows.Handle + r, _, err := PdhOpenQuery.Call(0, 0, uintptr(unsafe.Pointer(&query))) + if r != 0 { + return 0, err + } + return query, nil +} + +// CreateCounter XXX +func CreateCounter(query windows.Handle, pname, cname string) (*CounterInfo, error) { + var counter windows.Handle + r, _, err := PdhAddCounter.Call( + uintptr(query), + uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(cname))), + 0, + uintptr(unsafe.Pointer(&counter))) + if r != 0 { + return nil, err + } + return &CounterInfo{ + PostName: pname, + CounterName: cname, + Counter: counter, + }, nil +} + +// WMIQueryWithContext - wraps wmi.Query with a timed-out context to avoid hanging +func WMIQueryWithContext(ctx context.Context, query string, dst interface{}, connectServerArgs ...interface{}) error { + if _, ok := ctx.Deadline(); !ok { + ctxTimeout, cancel := context.WithTimeout(ctx, Timeout) + defer cancel() + ctx = ctxTimeout + } + + errChan := make(chan error, 1) + go func() { + errChan <- wmi.Query(query, dst, connectServerArgs...) + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-errChan: + return err + } +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem.go b/vendor/github.com/shirou/gopsutil/mem/mem.go new file mode 100644 index 0000000..995364f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem.go @@ -0,0 +1,97 @@ +package mem + +import ( + "encoding/json" + + "github.com/shirou/gopsutil/internal/common" +) + +var invoke common.Invoker = common.Invoke{} + +// Memory usage statistics. Total, Available and Used contain numbers of bytes +// for human consumption. +// +// The other fields in this struct contain kernel specific values. +type VirtualMemoryStat struct { + // Total amount of RAM on this system + Total uint64 `json:"total"` + + // RAM available for programs to allocate + // + // This value is computed from the kernel specific values. + Available uint64 `json:"available"` + + // RAM used by programs + // + // This value is computed from the kernel specific values. + Used uint64 `json:"used"` + + // Percentage of RAM used by programs + // + // This value is computed from the kernel specific values. + UsedPercent float64 `json:"usedPercent"` + + // This is the kernel's notion of free memory; RAM chips whose bits nobody + // cares about the value of right now. For a human consumable number, + // Available is what you really want. + Free uint64 `json:"free"` + + // OS X / BSD specific numbers: + // http://www.macyourself.com/2010/02/17/what-is-free-wired-active-and-inactive-system-memory-ram/ + Active uint64 `json:"active"` + Inactive uint64 `json:"inactive"` + Wired uint64 `json:"wired"` + + // FreeBSD specific numbers: + // https://reviews.freebsd.org/D8467 + Laundry uint64 `json:"laundry"` + + // Linux specific numbers + // https://www.centos.org/docs/5/html/5.1/Deployment_Guide/s2-proc-meminfo.html + // https://www.kernel.org/doc/Documentation/filesystems/proc.txt + // https://www.kernel.org/doc/Documentation/vm/overcommit-accounting + Buffers uint64 `json:"buffers"` + Cached uint64 `json:"cached"` + Writeback uint64 `json:"writeback"` + Dirty uint64 `json:"dirty"` + WritebackTmp uint64 `json:"writebacktmp"` + Shared uint64 `json:"shared"` + Slab uint64 `json:"slab"` + SReclaimable uint64 `json:"sreclaimable"` + PageTables uint64 `json:"pagetables"` + SwapCached uint64 `json:"swapcached"` + CommitLimit uint64 `json:"commitlimit"` + CommittedAS uint64 `json:"committedas"` + HighTotal uint64 `json:"hightotal"` + HighFree uint64 `json:"highfree"` + LowTotal uint64 `json:"lowtotal"` + LowFree uint64 `json:"lowfree"` + SwapTotal uint64 `json:"swaptotal"` + SwapFree uint64 `json:"swapfree"` + Mapped uint64 `json:"mapped"` + VMallocTotal uint64 `json:"vmalloctotal"` + VMallocUsed uint64 `json:"vmallocused"` + VMallocChunk uint64 `json:"vmallocchunk"` + HugePagesTotal uint64 `json:"hugepagestotal"` + HugePagesFree uint64 `json:"hugepagesfree"` + HugePageSize uint64 `json:"hugepagesize"` +} + +type SwapMemoryStat struct { + Total uint64 `json:"total"` + Used uint64 `json:"used"` + Free uint64 `json:"free"` + UsedPercent float64 `json:"usedPercent"` + Sin uint64 `json:"sin"` + Sout uint64 `json:"sout"` +} + +func (m VirtualMemoryStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +func (m SwapMemoryStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/mem/mem_darwin.go new file mode 100644 index 0000000..4fe7009 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_darwin.go @@ -0,0 +1,74 @@ +// +build darwin + +package mem + +import ( + "context" + "encoding/binary" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +func getHwMemsize() (uint64, error) { + totalString, err := unix.Sysctl("hw.memsize") + if err != nil { + return 0, err + } + + // unix.sysctl() helpfully assumes the result is a null-terminated string and + // removes the last byte of the result if it's 0 :/ + totalString += "\x00" + + total := uint64(binary.LittleEndian.Uint64([]byte(totalString))) + + return total, nil +} + +// SwapMemory returns swapinfo. +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + var ret *SwapMemoryStat + + swapUsage, err := common.DoSysctrlWithContext(ctx, "vm.swapusage") + if err != nil { + return ret, err + } + + total := strings.Replace(swapUsage[2], "M", "", 1) + used := strings.Replace(swapUsage[5], "M", "", 1) + free := strings.Replace(swapUsage[8], "M", "", 1) + + total_v, err := strconv.ParseFloat(total, 64) + if err != nil { + return nil, err + } + used_v, err := strconv.ParseFloat(used, 64) + if err != nil { + return nil, err + } + free_v, err := strconv.ParseFloat(free, 64) + if err != nil { + return nil, err + } + + u := float64(0) + if total_v != 0 { + u = ((total_v - free_v) / total_v) * 100.0 + } + + // vm.swapusage shows "M", multiply 1024 * 1024 to convert bytes. + ret = &SwapMemoryStat{ + Total: uint64(total_v * 1024 * 1024), + Used: uint64(used_v * 1024 * 1024), + Free: uint64(free_v * 1024 * 1024), + UsedPercent: u, + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/mem/mem_darwin_cgo.go new file mode 100644 index 0000000..389f8cd --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_darwin_cgo.go @@ -0,0 +1,59 @@ +// +build darwin +// +build cgo + +package mem + +/* +#include +*/ +import "C" + +import ( + "context" + "fmt" + "unsafe" + + "golang.org/x/sys/unix" +) + +// VirtualMemory returns VirtualmemoryStat. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + count := C.mach_msg_type_number_t(C.HOST_VM_INFO_COUNT) + var vmstat C.vm_statistics_data_t + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_VM_INFO, + C.host_info_t(unsafe.Pointer(&vmstat)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + pageSize := uint64(unix.Getpagesize()) + total, err := getHwMemsize() + if err != nil { + return nil, err + } + totalCount := C.natural_t(total / pageSize) + + availableCount := vmstat.inactive_count + vmstat.free_count + usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) + + usedCount := totalCount - availableCount + + return &VirtualMemoryStat{ + Total: total, + Available: pageSize * uint64(availableCount), + Used: pageSize * uint64(usedCount), + UsedPercent: usedPercent, + Free: pageSize * uint64(vmstat.free_count), + Active: pageSize * uint64(vmstat.active_count), + Inactive: pageSize * uint64(vmstat.inactive_count), + Wired: pageSize * uint64(vmstat.wire_count), + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/mem/mem_darwin_nocgo.go new file mode 100644 index 0000000..dd7c2e6 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_darwin_nocgo.go @@ -0,0 +1,94 @@ +// +build darwin +// +build !cgo + +package mem + +import ( + "context" + "os/exec" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// Runs vm_stat and returns Free and inactive pages +func getVMStat(vms *VirtualMemoryStat) error { + vm_stat, err := exec.LookPath("vm_stat") + if err != nil { + return err + } + out, err := invoke.Command(vm_stat) + if err != nil { + return err + } + return parseVMStat(string(out), vms) +} + +func parseVMStat(out string, vms *VirtualMemoryStat) error { + var err error + + lines := strings.Split(out, "\n") + pagesize := uint64(unix.Getpagesize()) + for _, line := range lines { + fields := strings.Split(line, ":") + if len(fields) < 2 { + continue + } + key := strings.TrimSpace(fields[0]) + value := strings.Trim(fields[1], " .") + switch key { + case "Pages free": + free, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Free = free * pagesize + case "Pages inactive": + inactive, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Inactive = inactive * pagesize + case "Pages active": + active, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Active = active * pagesize + case "Pages wired down": + wired, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Wired = wired * pagesize + } + } + return err +} + +// VirtualMemory returns VirtualmemoryStat. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + ret := &VirtualMemoryStat{} + + total, err := getHwMemsize() + if err != nil { + return nil, err + } + err = getVMStat(ret) + if err != nil { + return nil, err + } + + ret.Available = ret.Free + ret.Inactive + ret.Total = total + + ret.Used = ret.Total - ret.Available + ret.UsedPercent = 100 * float64(ret.Used) / float64(ret.Total) + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_fallback.go b/vendor/github.com/shirou/gopsutil/mem/mem_fallback.go new file mode 100644 index 0000000..2a0fd45 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_fallback.go @@ -0,0 +1,25 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows + +package mem + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + return nil, common.ErrNotImplementedError +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_freebsd.go b/vendor/github.com/shirou/gopsutil/mem/mem_freebsd.go new file mode 100644 index 0000000..a792281 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_freebsd.go @@ -0,0 +1,142 @@ +// +build freebsd + +package mem + +import ( + "context" + "errors" + "unsafe" + + "golang.org/x/sys/unix" +) + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + pageSize, err := unix.SysctlUint32("vm.stats.vm.v_page_size") + if err != nil { + return nil, err + } + physmem, err := unix.SysctlUint64("hw.physmem") + if err != nil { + return nil, err + } + free, err := unix.SysctlUint32("vm.stats.vm.v_free_count") + if err != nil { + return nil, err + } + active, err := unix.SysctlUint32("vm.stats.vm.v_active_count") + if err != nil { + return nil, err + } + inactive, err := unix.SysctlUint32("vm.stats.vm.v_inactive_count") + if err != nil { + return nil, err + } + buffers, err := unix.SysctlUint64("vfs.bufspace") + if err != nil { + return nil, err + } + wired, err := unix.SysctlUint32("vm.stats.vm.v_wire_count") + if err != nil { + return nil, err + } + var cached, laundry uint32 + osreldate, _ := unix.SysctlUint32("kern.osreldate") + if osreldate < 1102000 { + cached, err = unix.SysctlUint32("vm.stats.vm.v_cache_count") + if err != nil { + return nil, err + } + } else { + laundry, err = unix.SysctlUint32("vm.stats.vm.v_laundry_count") + if err != nil { + return nil, err + } + } + + p := uint64(pageSize) + ret := &VirtualMemoryStat{ + Total: uint64(physmem), + Free: uint64(free) * p, + Active: uint64(active) * p, + Inactive: uint64(inactive) * p, + Cached: uint64(cached) * p, + Buffers: uint64(buffers), + Wired: uint64(wired) * p, + Laundry: uint64(laundry) * p, + } + + ret.Available = ret.Inactive + ret.Cached + ret.Free + ret.Laundry + ret.Used = ret.Total - ret.Available + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + return ret, nil +} + +// Return swapinfo +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +// Constants from vm/vm_param.h +// nolint: golint +const ( + XSWDEV_VERSION = 1 +) + +// Types from vm/vm_param.h +type xswdev struct { + Version uint32 // Version is the version + Dev uint32 // Dev is the device identifier + Flags int32 // Flags is the swap flags applied to the device + NBlks int32 // NBlks is the total number of blocks + Used int32 // Used is the number of blocks used +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + // FreeBSD can have multiple swap devices so we total them up + i, err := unix.SysctlUint32("vm.nswapdev") + if err != nil { + return nil, err + } + + if i == 0 { + return nil, errors.New("no swap devices found") + } + + c := int(i) + + i, err = unix.SysctlUint32("vm.stats.vm.v_page_size") + if err != nil { + return nil, err + } + pageSize := uint64(i) + + var buf []byte + s := &SwapMemoryStat{} + for n := 0; n < c; n++ { + buf, err = unix.SysctlRaw("vm.swap_info", n) + if err != nil { + return nil, err + } + + xsw := (*xswdev)(unsafe.Pointer(&buf[0])) + if xsw.Version != XSWDEV_VERSION { + return nil, errors.New("xswdev version mismatch") + } + s.Total += uint64(xsw.NBlks) + s.Used += uint64(xsw.Used) + } + + if s.Total != 0 { + s.UsedPercent = float64(s.Used) / float64(s.Total) * 100 + } + s.Total *= pageSize + s.Used *= pageSize + s.Free = s.Total - s.Used + + return s, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/mem/mem_linux.go new file mode 100644 index 0000000..a3425d7 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_linux.go @@ -0,0 +1,158 @@ +// +build linux + +package mem + +import ( + "context" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/unix" +) + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + filename := common.HostProc("meminfo") + lines, _ := common.ReadLines(filename) + // flag if MemAvailable is in /proc/meminfo (kernel 3.14+) + memavail := false + + ret := &VirtualMemoryStat{} + for _, line := range lines { + fields := strings.Split(line, ":") + if len(fields) != 2 { + continue + } + key := strings.TrimSpace(fields[0]) + value := strings.TrimSpace(fields[1]) + value = strings.Replace(value, " kB", "", -1) + + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, err + } + switch key { + case "MemTotal": + ret.Total = t * 1024 + case "MemFree": + ret.Free = t * 1024 + case "MemAvailable": + memavail = true + ret.Available = t * 1024 + case "Buffers": + ret.Buffers = t * 1024 + case "Cached": + ret.Cached = t * 1024 + case "Active": + ret.Active = t * 1024 + case "Inactive": + ret.Inactive = t * 1024 + case "Writeback": + ret.Writeback = t * 1024 + case "WritebackTmp": + ret.WritebackTmp = t * 1024 + case "Dirty": + ret.Dirty = t * 1024 + case "Shmem": + ret.Shared = t * 1024 + case "Slab": + ret.Slab = t * 1024 + case "SReclaimable": + ret.SReclaimable = t * 1024 + case "PageTables": + ret.PageTables = t * 1024 + case "SwapCached": + ret.SwapCached = t * 1024 + case "CommitLimit": + ret.CommitLimit = t * 1024 + case "Committed_AS": + ret.CommittedAS = t * 1024 + case "HighTotal": + ret.HighTotal = t * 1024 + case "HighFree": + ret.HighFree = t * 1024 + case "LowTotal": + ret.LowTotal = t * 1024 + case "LowFree": + ret.LowFree = t * 1024 + case "SwapTotal": + ret.SwapTotal = t * 1024 + case "SwapFree": + ret.SwapFree = t * 1024 + case "Mapped": + ret.Mapped = t * 1024 + case "VmallocTotal": + ret.VMallocTotal = t * 1024 + case "VmallocUsed": + ret.VMallocUsed = t * 1024 + case "VmallocChunk": + ret.VMallocChunk = t * 1024 + case "HugePages_Total": + ret.HugePagesTotal = t + case "HugePages_Free": + ret.HugePagesFree = t + case "Hugepagesize": + ret.HugePageSize = t * 1024 + } + } + + ret.Cached += ret.SReclaimable + + if !memavail { + ret.Available = ret.Free + ret.Buffers + ret.Cached + } + ret.Used = ret.Total - ret.Free - ret.Buffers - ret.Cached + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + return ret, nil +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + sysinfo := &unix.Sysinfo_t{} + + if err := unix.Sysinfo(sysinfo); err != nil { + return nil, err + } + ret := &SwapMemoryStat{ + Total: uint64(sysinfo.Totalswap) * uint64(sysinfo.Unit), + Free: uint64(sysinfo.Freeswap) * uint64(sysinfo.Unit), + } + ret.Used = ret.Total - ret.Free + //check Infinity + if ret.Total != 0 { + ret.UsedPercent = float64(ret.Total-ret.Free) / float64(ret.Total) * 100.0 + } else { + ret.UsedPercent = 0 + } + filename := common.HostProc("vmstat") + lines, _ := common.ReadLines(filename) + for _, l := range lines { + fields := strings.Fields(l) + if len(fields) < 2 { + continue + } + switch fields[0] { + case "pswpin": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.Sin = value * 4 * 1024 + case "pswpout": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.Sout = value * 4 * 1024 + } + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_openbsd.go b/vendor/github.com/shirou/gopsutil/mem/mem_openbsd.go new file mode 100644 index 0000000..35472a3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_openbsd.go @@ -0,0 +1,124 @@ +// +build openbsd + +package mem + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "os/exec" + + "github.com/shirou/gopsutil/internal/common" +) + +func GetPageSize() (uint64, error) { + return GetPageSizeWithContext(context.Background()) +} + +func GetPageSizeWithContext(ctx context.Context) (uint64, error) { + mib := []int32{CTLVm, VmUvmexp} + buf, length, err := common.CallSyscall(mib) + if err != nil { + return 0, err + } + if length < sizeOfUvmexp { + return 0, fmt.Errorf("short syscall ret %d bytes", length) + } + var uvmexp Uvmexp + br := bytes.NewReader(buf) + err = common.Read(br, binary.LittleEndian, &uvmexp) + if err != nil { + return 0, err + } + return uint64(uvmexp.Pagesize), nil +} + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + mib := []int32{CTLVm, VmUvmexp} + buf, length, err := common.CallSyscall(mib) + if err != nil { + return nil, err + } + if length < sizeOfUvmexp { + return nil, fmt.Errorf("short syscall ret %d bytes", length) + } + var uvmexp Uvmexp + br := bytes.NewReader(buf) + err = common.Read(br, binary.LittleEndian, &uvmexp) + if err != nil { + return nil, err + } + p := uint64(uvmexp.Pagesize) + + ret := &VirtualMemoryStat{ + Total: uint64(uvmexp.Npages) * p, + Free: uint64(uvmexp.Free) * p, + Active: uint64(uvmexp.Active) * p, + Inactive: uint64(uvmexp.Inactive) * p, + Cached: 0, // not available + Wired: uint64(uvmexp.Wired) * p, + } + + ret.Available = ret.Inactive + ret.Cached + ret.Free + ret.Used = ret.Total - ret.Available + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + mib = []int32{CTLVfs, VfsGeneric, VfsBcacheStat} + buf, length, err = common.CallSyscall(mib) + if err != nil { + return nil, err + } + if length < sizeOfBcachestats { + return nil, fmt.Errorf("short syscall ret %d bytes", length) + } + var bcs Bcachestats + br = bytes.NewReader(buf) + err = common.Read(br, binary.LittleEndian, &bcs) + if err != nil { + return nil, err + } + ret.Buffers = uint64(bcs.Numbufpages) * p + + return ret, nil +} + +// Return swapctl summary info +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + swapctl, err := exec.LookPath("swapctl") + if err != nil { + return nil, err + } + + out, err := invoke.CommandWithContext(ctx, swapctl, "-sk") + if err != nil { + return &SwapMemoryStat{}, nil + } + + line := string(out) + var total, used, free uint64 + + _, err = fmt.Sscanf(line, + "total: %d 1K-blocks allocated, %d used, %d available", + &total, &used, &free) + if err != nil { + return nil, errors.New("failed to parse swapctl output") + } + + percent := float64(used) / float64(total) * 100 + return &SwapMemoryStat{ + Total: total * 1024, + Used: used * 1024, + Free: free * 1024, + UsedPercent: percent, + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/mem/mem_openbsd_amd64.go new file mode 100644 index 0000000..e09b908 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_openbsd_amd64.go @@ -0,0 +1,122 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package mem + +const ( + CTLVm = 2 + CTLVfs = 10 + VmUvmexp = 4 + VfsGeneric = 0 + VfsBcacheStat = 3 +) + +const ( + sizeOfUvmexp = 0x154 + sizeOfBcachestats = 0x78 +) + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Anonpages int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Nanonneeded int32 + Nfreeanon int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Obsolete_swapins int32 + Obsolete_swapouts int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Zeroaborts int32 + Fltnoram int32 + Fltnoanon int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Pdreanon int32 + Pdrevnode int32 + Pdrevtext int32 + Fpswtch int32 + Kmapent int32 +} +type Bcachestats struct { + Numbufs int64 + Numbufpages int64 + Numdirtypages int64 + Numcleanpages int64 + Pendingwrites int64 + Pendingreads int64 + Numwrites int64 + Numreads int64 + Cachehits int64 + Busymapped int64 + Dmapages int64 + Highpages int64 + Delwribufs int64 + Kvaslots int64 + Avail int64 +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_solaris.go b/vendor/github.com/shirou/gopsutil/mem/mem_solaris.go new file mode 100644 index 0000000..0736bc4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_solaris.go @@ -0,0 +1,121 @@ +package mem + +import ( + "context" + "errors" + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" +) + +// VirtualMemory for Solaris is a minimal implementation which only returns +// what Nomad needs. It does take into account global vs zone, however. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + result := &VirtualMemoryStat{} + + zoneName, err := zoneName() + if err != nil { + return nil, err + } + + if zoneName == "global" { + cap, err := globalZoneMemoryCapacity() + if err != nil { + return nil, err + } + result.Total = cap + } else { + cap, err := nonGlobalZoneMemoryCapacity() + if err != nil { + return nil, err + } + result.Total = cap + } + + return result, nil +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + return nil, common.ErrNotImplementedError +} + +func zoneName() (string, error) { + zonename, err := exec.LookPath("/usr/bin/zonename") + if err != nil { + return "", err + } + + ctx := context.Background() + out, err := invoke.CommandWithContext(ctx, zonename) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(out)), nil +} + +var globalZoneMemoryCapacityMatch = regexp.MustCompile(`memory size: ([\d]+) Megabytes`) + +func globalZoneMemoryCapacity() (uint64, error) { + prtconf, err := exec.LookPath("/usr/sbin/prtconf") + if err != nil { + return 0, err + } + + ctx := context.Background() + out, err := invoke.CommandWithContext(ctx, prtconf) + if err != nil { + return 0, err + } + + match := globalZoneMemoryCapacityMatch.FindAllStringSubmatch(string(out), -1) + if len(match) != 1 { + return 0, errors.New("memory size not contained in output of /usr/sbin/prtconf") + } + + totalMB, err := strconv.ParseUint(match[0][1], 10, 64) + if err != nil { + return 0, err + } + + return totalMB * 1024 * 1024, nil +} + +var kstatMatch = regexp.MustCompile(`([^\s]+)[\s]+([^\s]*)`) + +func nonGlobalZoneMemoryCapacity() (uint64, error) { + kstat, err := exec.LookPath("/usr/bin/kstat") + if err != nil { + return 0, err + } + + ctx := context.Background() + out, err := invoke.CommandWithContext(ctx, kstat, "-p", "-c", "zone_memory_cap", "memory_cap:*:*:physcap") + if err != nil { + return 0, err + } + + kstats := kstatMatch.FindAllStringSubmatch(string(out), -1) + if len(kstats) != 1 { + return 0, fmt.Errorf("expected 1 kstat, found %d", len(kstats)) + } + + memSizeBytes, err := strconv.ParseUint(kstats[0][2], 10, 64) + if err != nil { + return 0, err + } + + return memSizeBytes, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/mem_windows.go b/vendor/github.com/shirou/gopsutil/mem/mem_windows.go new file mode 100644 index 0000000..cfdf8bd --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/mem_windows.go @@ -0,0 +1,97 @@ +// +build windows + +package mem + +import ( + "context" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/windows" +) + +var ( + procGlobalMemoryStatusEx = common.Modkernel32.NewProc("GlobalMemoryStatusEx") + procGetPerformanceInfo = common.ModPsapi.NewProc("GetPerformanceInfo") +) + +type memoryStatusEx struct { + cbSize uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 // in bytes + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + var memInfo memoryStatusEx + memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) + mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + if mem == 0 { + return nil, windows.GetLastError() + } + + ret := &VirtualMemoryStat{ + Total: memInfo.ullTotalPhys, + Available: memInfo.ullAvailPhys, + UsedPercent: float64(memInfo.dwMemoryLoad), + } + + ret.Used = ret.Total - ret.Available + return ret, nil +} + +type performanceInformation struct { + cb uint32 + commitTotal uint64 + commitLimit uint64 + commitPeak uint64 + physicalTotal uint64 + physicalAvailable uint64 + systemCache uint64 + kernelTotal uint64 + kernelPaged uint64 + kernelNonpaged uint64 + pageSize uint64 + handleCount uint32 + processCount uint32 + threadCount uint32 +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + var perfInfo performanceInformation + perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) + mem, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) + if mem == 0 { + return nil, windows.GetLastError() + } + tot := perfInfo.commitLimit * perfInfo.pageSize + used := perfInfo.commitTotal * perfInfo.pageSize + free := tot - used + var usedPercent float64 + if tot == 0 { + usedPercent = 0 + } else { + usedPercent = float64(used) / float64(tot) + } + ret := &SwapMemoryStat{ + Total: tot, + Used: used, + Free: free, + UsedPercent: usedPercent, + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/mem/types_openbsd.go b/vendor/github.com/shirou/gopsutil/mem/types_openbsd.go new file mode 100644 index 0000000..83cb91a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/mem/types_openbsd.go @@ -0,0 +1,34 @@ +// +build ignore + +/* +Input to cgo -godefs. +*/ + +package mem + +/* +#include +#include +#include +#include + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + CTLVm = 2 + CTLVfs = 10 + VmUvmexp = 4 // get uvmexp + VfsGeneric = 0 + VfsBcacheStat = 3 +) + +const ( + sizeOfUvmexp = C.sizeof_struct_uvmexp + sizeOfBcachestats = C.sizeof_struct_bcachestats +) + +type Uvmexp C.struct_uvmexp +type Bcachestats C.struct_bcachestats diff --git a/vendor/github.com/shirou/gopsutil/net/net.go b/vendor/github.com/shirou/gopsutil/net/net.go new file mode 100644 index 0000000..fce86c7 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net.go @@ -0,0 +1,259 @@ +package net + +import ( + "context" + "encoding/json" + "fmt" + "net" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/internal/common" +) + +var invoke common.Invoker = common.Invoke{} + +type IOCountersStat struct { + Name string `json:"name"` // interface name + BytesSent uint64 `json:"bytesSent"` // number of bytes sent + BytesRecv uint64 `json:"bytesRecv"` // number of bytes received + PacketsSent uint64 `json:"packetsSent"` // number of packets sent + PacketsRecv uint64 `json:"packetsRecv"` // number of packets received + Errin uint64 `json:"errin"` // total number of errors while receiving + Errout uint64 `json:"errout"` // total number of errors while sending + Dropin uint64 `json:"dropin"` // total number of incoming packets which were dropped + Dropout uint64 `json:"dropout"` // total number of outgoing packets which were dropped (always 0 on OSX and BSD) + Fifoin uint64 `json:"fifoin"` // total number of FIFO buffers errors while receiving + Fifoout uint64 `json:"fifoout"` // total number of FIFO buffers errors while sending + +} + +// Addr is implemented compatibility to psutil +type Addr struct { + IP string `json:"ip"` + Port uint32 `json:"port"` +} + +type ConnectionStat struct { + Fd uint32 `json:"fd"` + Family uint32 `json:"family"` + Type uint32 `json:"type"` + Laddr Addr `json:"localaddr"` + Raddr Addr `json:"remoteaddr"` + Status string `json:"status"` + Uids []int32 `json:"uids"` + Pid int32 `json:"pid"` +} + +// System wide stats about different network protocols +type ProtoCountersStat struct { + Protocol string `json:"protocol"` + Stats map[string]int64 `json:"stats"` +} + +// NetInterfaceAddr is designed for represent interface addresses +type InterfaceAddr struct { + Addr string `json:"addr"` +} + +type InterfaceStat struct { + MTU int `json:"mtu"` // maximum transmission unit + Name string `json:"name"` // e.g., "en0", "lo0", "eth0.100" + HardwareAddr string `json:"hardwareaddr"` // IEEE MAC-48, EUI-48 and EUI-64 form + Flags []string `json:"flags"` // e.g., FlagUp, FlagLoopback, FlagMulticast + Addrs []InterfaceAddr `json:"addrs"` +} + +type FilterStat struct { + ConnTrackCount int64 `json:"conntrackCount"` + ConnTrackMax int64 `json:"conntrackMax"` +} + +var constMap = map[string]int{ + "unix": syscall.AF_UNIX, + "TCP": syscall.SOCK_STREAM, + "UDP": syscall.SOCK_DGRAM, + "IPv4": syscall.AF_INET, + "IPv6": syscall.AF_INET6, +} + +func (n IOCountersStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n ConnectionStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n ProtoCountersStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (a Addr) String() string { + s, _ := json.Marshal(a) + return string(s) +} + +func (n InterfaceStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n InterfaceAddr) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func Interfaces() ([]InterfaceStat, error) { + return InterfacesWithContext(context.Background()) +} + +func InterfacesWithContext(ctx context.Context) ([]InterfaceStat, error) { + is, err := net.Interfaces() + if err != nil { + return nil, err + } + ret := make([]InterfaceStat, 0, len(is)) + for _, ifi := range is { + + var flags []string + if ifi.Flags&net.FlagUp != 0 { + flags = append(flags, "up") + } + if ifi.Flags&net.FlagBroadcast != 0 { + flags = append(flags, "broadcast") + } + if ifi.Flags&net.FlagLoopback != 0 { + flags = append(flags, "loopback") + } + if ifi.Flags&net.FlagPointToPoint != 0 { + flags = append(flags, "pointtopoint") + } + if ifi.Flags&net.FlagMulticast != 0 { + flags = append(flags, "multicast") + } + + r := InterfaceStat{ + Name: ifi.Name, + MTU: ifi.MTU, + HardwareAddr: ifi.HardwareAddr.String(), + Flags: flags, + } + addrs, err := ifi.Addrs() + if err == nil { + r.Addrs = make([]InterfaceAddr, 0, len(addrs)) + for _, addr := range addrs { + r.Addrs = append(r.Addrs, InterfaceAddr{ + Addr: addr.String(), + }) + } + + } + ret = append(ret, r) + } + + return ret, nil +} + +func getIOCountersAll(n []IOCountersStat) ([]IOCountersStat, error) { + r := IOCountersStat{ + Name: "all", + } + for _, nic := range n { + r.BytesRecv += nic.BytesRecv + r.PacketsRecv += nic.PacketsRecv + r.Errin += nic.Errin + r.Dropin += nic.Dropin + r.BytesSent += nic.BytesSent + r.PacketsSent += nic.PacketsSent + r.Errout += nic.Errout + r.Dropout += nic.Dropout + } + + return []IOCountersStat{r}, nil +} + +func parseNetLine(line string) (ConnectionStat, error) { + f := strings.Fields(line) + if len(f) < 8 { + return ConnectionStat{}, fmt.Errorf("wrong line,%s", line) + } + + if len(f) == 8 { + f = append(f, f[7]) + f[7] = "unix" + } + + pid, err := strconv.Atoi(f[1]) + if err != nil { + return ConnectionStat{}, err + } + fd, err := strconv.Atoi(strings.Trim(f[3], "u")) + if err != nil { + return ConnectionStat{}, fmt.Errorf("unknown fd, %s", f[3]) + } + netFamily, ok := constMap[f[4]] + if !ok { + return ConnectionStat{}, fmt.Errorf("unknown family, %s", f[4]) + } + netType, ok := constMap[f[7]] + if !ok { + return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[7]) + } + + var laddr, raddr Addr + if f[7] == "unix" { + laddr.IP = f[8] + } else { + laddr, raddr, err = parseNetAddr(f[8]) + if err != nil { + return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s", f[8]) + } + } + + n := ConnectionStat{ + Fd: uint32(fd), + Family: uint32(netFamily), + Type: uint32(netType), + Laddr: laddr, + Raddr: raddr, + Pid: int32(pid), + } + if len(f) == 10 { + n.Status = strings.Trim(f[9], "()") + } + + return n, nil +} + +func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { + parse := func(l string) (Addr, error) { + host, port, err := net.SplitHostPort(l) + if err != nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + lport, err := strconv.Atoi(port) + if err != nil { + return Addr{}, err + } + return Addr{IP: host, Port: uint32(lport)}, nil + } + + addrs := strings.Split(line, "->") + if len(addrs) == 0 { + return laddr, raddr, fmt.Errorf("wrong netaddr, %s", line) + } + laddr, err = parse(addrs[0]) + if len(addrs) == 2 { // remote addr exists + raddr, err = parse(addrs[1]) + if err != nil { + return laddr, raddr, err + } + } + + return laddr, raddr, err +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_darwin.go b/vendor/github.com/shirou/gopsutil/net/net_darwin.go new file mode 100644 index 0000000..0d89280 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_darwin.go @@ -0,0 +1,284 @@ +// +build darwin + +package net + +import ( + "context" + "errors" + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" +) + +var ( + errNetstatHeader = errors.New("Can't parse header of netstat output") + netstatLinkRegexp = regexp.MustCompile(`^$`) +) + +const endOfLine = "\n" + +func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err error) { + var ( + numericValue uint64 + columns = strings.Fields(line) + ) + + if columns[0] == "Name" { + err = errNetstatHeader + return + } + + // try to extract the numeric value from + if subMatch := netstatLinkRegexp.FindStringSubmatch(columns[2]); len(subMatch) == 2 { + numericValue, err = strconv.ParseUint(subMatch[1], 10, 64) + if err != nil { + return + } + linkIDUint := uint(numericValue) + linkID = &linkIDUint + } + + base := 1 + numberColumns := len(columns) + // sometimes Address is ommitted + if numberColumns < 12 { + base = 0 + } + if numberColumns < 11 || numberColumns > 13 { + err = fmt.Errorf("Line %q do have an invalid number of columns %d", line, numberColumns) + return + } + + parsed := make([]uint64, 0, 7) + vv := []string{ + columns[base+3], // Ipkts == PacketsRecv + columns[base+4], // Ierrs == Errin + columns[base+5], // Ibytes == BytesRecv + columns[base+6], // Opkts == PacketsSent + columns[base+7], // Oerrs == Errout + columns[base+8], // Obytes == BytesSent + } + if len(columns) == 12 { + vv = append(vv, columns[base+10]) + } + + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + if numericValue, err = strconv.ParseUint(target, 10, 64); err != nil { + return + } + parsed = append(parsed, numericValue) + } + + stat = &IOCountersStat{ + Name: strings.Trim(columns[0], "*"), // remove the * that sometimes is on right on interface + PacketsRecv: parsed[0], + Errin: parsed[1], + BytesRecv: parsed[2], + PacketsSent: parsed[3], + Errout: parsed[4], + BytesSent: parsed[5], + } + if len(parsed) == 7 { + stat.Dropout = parsed[6] + } + return +} + +type netstatInterface struct { + linkID *uint + stat *IOCountersStat +} + +func parseNetstatOutput(output string) ([]netstatInterface, error) { + var ( + err error + lines = strings.Split(strings.Trim(output, endOfLine), endOfLine) + ) + + // number of interfaces is number of lines less one for the header + numberInterfaces := len(lines) - 1 + + interfaces := make([]netstatInterface, numberInterfaces) + // no output beside header + if numberInterfaces == 0 { + return interfaces, nil + } + + for index := 0; index < numberInterfaces; index++ { + nsIface := netstatInterface{} + if nsIface.stat, nsIface.linkID, err = parseNetstatLine(lines[index+1]); err != nil { + return nil, err + } + interfaces[index] = nsIface + } + return interfaces, nil +} + +// map that hold the name of a network interface and the number of usage +type mapInterfaceNameUsage map[string]uint + +func newMapInterfaceNameUsage(ifaces []netstatInterface) mapInterfaceNameUsage { + output := make(mapInterfaceNameUsage) + for index := range ifaces { + if ifaces[index].linkID != nil { + ifaceName := ifaces[index].stat.Name + usage, ok := output[ifaceName] + if ok { + output[ifaceName] = usage + 1 + } else { + output[ifaceName] = 1 + } + } + } + return output +} + +func (min mapInterfaceNameUsage) isTruncated() bool { + for _, usage := range min { + if usage > 1 { + return true + } + } + return false +} + +func (min mapInterfaceNameUsage) notTruncated() []string { + output := make([]string, 0) + for ifaceName, usage := range min { + if usage == 1 { + output = append(output, ifaceName) + } + } + return output +} + +// example of `netstat -ibdnW` output on yosemite +// Name Mtu Network Address Ipkts Ierrs Ibytes Opkts Oerrs Obytes Coll Drop +// lo0 16384 869107 0 169411755 869107 0 169411755 0 0 +// lo0 16384 ::1/128 ::1 869107 - 169411755 869107 - 169411755 - - +// lo0 16384 127 127.0.0.1 869107 - 169411755 869107 - 169411755 - - +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + var ( + ret []IOCountersStat + retIndex int + ) + + netstat, err := exec.LookPath("/usr/sbin/netstat") + if err != nil { + return nil, err + } + + // try to get all interface metrics, and hope there won't be any truncated + out, err := invoke.CommandWithContext(ctx, netstat, "-ibdnW") + if err != nil { + return nil, err + } + + nsInterfaces, err := parseNetstatOutput(string(out)) + if err != nil { + return nil, err + } + + ifaceUsage := newMapInterfaceNameUsage(nsInterfaces) + notTruncated := ifaceUsage.notTruncated() + ret = make([]IOCountersStat, len(notTruncated)) + + if !ifaceUsage.isTruncated() { + // no truncated interface name, return stats of all interface with + for index := range nsInterfaces { + if nsInterfaces[index].linkID != nil { + ret[retIndex] = *nsInterfaces[index].stat + retIndex++ + } + } + } else { + // duplicated interface, list all interfaces + ifconfig, err := exec.LookPath("/sbin/ifconfig") + if err != nil { + return nil, err + } + if out, err = invoke.CommandWithContext(ctx, ifconfig, "-l"); err != nil { + return nil, err + } + interfaceNames := strings.Fields(strings.TrimRight(string(out), endOfLine)) + + // for each of the interface name, run netstat if we don't have any stats yet + for _, interfaceName := range interfaceNames { + truncated := true + for index := range nsInterfaces { + if nsInterfaces[index].linkID != nil && nsInterfaces[index].stat.Name == interfaceName { + // handle the non truncated name to avoid execute netstat for them again + ret[retIndex] = *nsInterfaces[index].stat + retIndex++ + truncated = false + break + } + } + if truncated { + // run netstat with -I$ifacename + if out, err = invoke.CommandWithContext(ctx, netstat, "-ibdnWI"+interfaceName); err != nil { + return nil, err + } + parsedIfaces, err := parseNetstatOutput(string(out)) + if err != nil { + return nil, err + } + if len(parsedIfaces) == 0 { + // interface had been removed since `ifconfig -l` had been executed + continue + } + for index := range parsedIfaces { + if parsedIfaces[index].linkID != nil { + ret = append(ret, *parsedIfaces[index].stat) + break + } + } + } + } + } + + if pernic == false { + return getIOCountersAll(ret) + } + return ret, nil +} + +// NetIOCountersByFile is an method which is added just a compatibility for linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, errors.New("NetFilterCounters not implemented for darwin") +} + +// NetProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for Darwin +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, errors.New("NetProtoCounters not implemented for darwin") +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/net/net_fallback.go new file mode 100644 index 0000000..7c5e632 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_fallback.go @@ -0,0 +1,49 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!windows + +package net + +import ( + "context" + + "github.com/shirou/gopsutil/internal/common" +) + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + return []IOCountersStat{}, common.ErrNotImplementedError +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return []FilterStat{}, common.ErrNotImplementedError +} + +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return []ProtoCountersStat{}, common.ErrNotImplementedError +} + +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/net/net_freebsd.go new file mode 100644 index 0000000..ce02415 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_freebsd.go @@ -0,0 +1,125 @@ +// +build freebsd + +package net + +import ( + "context" + "errors" + "os/exec" + "strconv" + "strings" + + "github.com/shirou/gopsutil/internal/common" +) + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + netstat, err := exec.LookPath("/usr/bin/netstat") + if err != nil { + return nil, err + } + out, err := invoke.CommandWithContext(ctx, netstat, "-ibdnW") + if err != nil { + return nil, err + } + + lines := strings.Split(string(out), "\n") + ret := make([]IOCountersStat, 0, len(lines)-1) + exists := make([]string, 0, len(ret)) + + for _, line := range lines { + values := strings.Fields(line) + if len(values) < 1 || values[0] == "Name" { + continue + } + if common.StringsHas(exists, values[0]) { + // skip if already get + continue + } + exists = append(exists, values[0]) + + if len(values) < 12 { + continue + } + base := 1 + // sometimes Address is ommitted + if len(values) < 13 { + base = 0 + } + + parsed := make([]uint64, 0, 8) + vv := []string{ + values[base+3], // PacketsRecv + values[base+4], // Errin + values[base+5], // Dropin + values[base+6], // BytesRecvn + values[base+7], // PacketSent + values[base+8], // Errout + values[base+9], // BytesSent + values[base+11], // Dropout + } + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + t, err := strconv.ParseUint(target, 10, 64) + if err != nil { + return nil, err + } + parsed = append(parsed, t) + } + + n := IOCountersStat{ + Name: values[0], + PacketsRecv: parsed[0], + Errin: parsed[1], + Dropin: parsed[2], + BytesRecv: parsed[3], + PacketsSent: parsed[4], + Errout: parsed[5], + BytesSent: parsed[6], + Dropout: parsed[7], + } + ret = append(ret, n) + } + + if pernic == false { + return getIOCountersAll(ret) + } + + return ret, nil +} + +// NetIOCountersByFile is an method which is added just a compatibility for linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, errors.New("NetFilterCounters not implemented for freebsd") +} + +// NetProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for FreeBSD +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, errors.New("NetProtoCounters not implemented for freebsd") +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_linux.go b/vendor/github.com/shirou/gopsutil/net/net_linux.go new file mode 100644 index 0000000..71842fb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_linux.go @@ -0,0 +1,793 @@ +// +build linux + +package net + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/internal/common" +) + +// NetIOCounters returnes network I/O statistics for every network +// interface installed on the system. If pernic argument is false, +// return only sum of all information (which name is 'all'). If true, +// every network interface installed on the system is returned +// separately. +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + filename := common.HostProc("net/dev") + return IOCountersByFile(pernic, filename) +} + +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + + parts := make([]string, 2) + + statlen := len(lines) - 1 + + ret := make([]IOCountersStat, 0, statlen) + + for _, line := range lines[2:] { + separatorPos := strings.LastIndex(line, ":") + if separatorPos == -1 { + continue + } + parts[0] = line[0:separatorPos] + parts[1] = line[separatorPos+1:] + + interfaceName := strings.TrimSpace(parts[0]) + if interfaceName == "" { + continue + } + + fields := strings.Fields(strings.TrimSpace(parts[1])) + bytesRecv, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return ret, err + } + packetsRecv, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return ret, err + } + errIn, err := strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return ret, err + } + dropIn, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return ret, err + } + fifoIn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return ret, err + } + bytesSent, err := strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return ret, err + } + packetsSent, err := strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return ret, err + } + errOut, err := strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return ret, err + } + dropOut, err := strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return ret, err + } + fifoOut, err := strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return ret, err + } + + nic := IOCountersStat{ + Name: interfaceName, + BytesRecv: bytesRecv, + PacketsRecv: packetsRecv, + Errin: errIn, + Dropin: dropIn, + Fifoin: fifoIn, + BytesSent: bytesSent, + PacketsSent: packetsSent, + Errout: errOut, + Dropout: dropOut, + Fifoout: fifoOut, + } + ret = append(ret, nic) + } + + if pernic == false { + return getIOCountersAll(ret) + } + + return ret, nil +} + +var netProtocols = []string{ + "ip", + "icmp", + "icmpmsg", + "tcp", + "udp", + "udplite", +} + +// NetProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Available protocols: +// ip,icmp,icmpmsg,tcp,udp,udplite +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + if len(protocols) == 0 { + protocols = netProtocols + } + + stats := make([]ProtoCountersStat, 0, len(protocols)) + protos := make(map[string]bool, len(protocols)) + for _, p := range protocols { + protos[p] = true + } + + filename := common.HostProc("net/snmp") + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + + linecount := len(lines) + for i := 0; i < linecount; i++ { + line := lines[i] + r := strings.IndexRune(line, ':') + if r == -1 { + return nil, errors.New(filename + " is not fomatted correctly, expected ':'.") + } + proto := strings.ToLower(line[:r]) + if !protos[proto] { + // skip protocol and data line + i++ + continue + } + + // Read header line + statNames := strings.Split(line[r+2:], " ") + + // Read data line + i++ + statValues := strings.Split(lines[i][r+2:], " ") + if len(statNames) != len(statValues) { + return nil, errors.New(filename + " is not fomatted correctly, expected same number of columns.") + } + stat := ProtoCountersStat{ + Protocol: proto, + Stats: make(map[string]int64, len(statNames)), + } + for j := range statNames { + value, err := strconv.ParseInt(statValues[j], 10, 64) + if err != nil { + return nil, err + } + stat.Stats[statNames[j]] = value + } + stats = append(stats, stat) + } + return stats, nil +} + +// NetFilterCounters returns iptables conntrack statistics +// the currently in use conntrack count and the max. +// If the file does not exist or is invalid it will return nil. +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + countfile := common.HostProc("sys/net/netfilter/nf_conntrack_count") + maxfile := common.HostProc("sys/net/netfilter/nf_conntrack_max") + + count, err := common.ReadInts(countfile) + + if err != nil { + return nil, err + } + stats := make([]FilterStat, 0, 1) + + max, err := common.ReadInts(maxfile) + if err != nil { + return nil, err + } + + payload := FilterStat{ + ConnTrackCount: count[0], + ConnTrackMax: max[0], + } + + stats = append(stats, payload) + return stats, nil +} + +// http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h +var TCPStatuses = map[string]string{ + "01": "ESTABLISHED", + "02": "SYN_SENT", + "03": "SYN_RECV", + "04": "FIN_WAIT1", + "05": "FIN_WAIT2", + "06": "TIME_WAIT", + "07": "CLOSE", + "08": "CLOSE_WAIT", + "09": "LAST_ACK", + "0A": "LISTEN", + "0B": "CLOSING", +} + +type netConnectionKindType struct { + family uint32 + sockType uint32 + filename string +} + +var kindTCP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_STREAM, + filename: "tcp", +} +var kindTCP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_STREAM, + filename: "tcp6", +} +var kindUDP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_DGRAM, + filename: "udp", +} +var kindUDP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_DGRAM, + filename: "udp6", +} +var kindUNIX = netConnectionKindType{ + family: syscall.AF_UNIX, + filename: "unix", +} + +var netConnectionKindMap = map[string][]netConnectionKindType{ + "all": {kindTCP4, kindTCP6, kindUDP4, kindUDP6, kindUNIX}, + "tcp": {kindTCP4, kindTCP6}, + "tcp4": {kindTCP4}, + "tcp6": {kindTCP6}, + "udp": {kindUDP4, kindUDP6}, + "udp4": {kindUDP4}, + "udp6": {kindUDP6}, + "unix": {kindUNIX}, + "inet": {kindTCP4, kindTCP6, kindUDP4, kindUDP6}, + "inet4": {kindTCP4, kindUDP4}, + "inet6": {kindTCP6, kindUDP6}, +} + +type inodeMap struct { + pid int32 + fd uint32 +} + +type connTmp struct { + fd uint32 + family uint32 + sockType uint32 + laddr Addr + raddr Addr + status string + pid int32 + boundPid int32 + path string +} + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsPid(kind, 0) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return ConnectionsPidMax(kind, 0, max) +} + +// Return a list of network connections opened by a process. +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + tmap, ok := netConnectionKindMap[kind] + if !ok { + return nil, fmt.Errorf("invalid kind, %s", kind) + } + root := common.HostProc() + var err error + var inodes map[string][]inodeMap + if pid == 0 { + inodes, err = getProcInodesAll(root, 0) + } else { + inodes, err = getProcInodes(root, pid, 0) + if len(inodes) == 0 { + // no connection for the pid + return []ConnectionStat{}, nil + } + } + if err != nil { + return nil, fmt.Errorf("cound not get pid(s), %d: %s", pid, err) + } + return statsFromInodes(root, pid, tmap, inodes) +} + +// Return up to `max` network connections opened by a process. +func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + tmap, ok := netConnectionKindMap[kind] + if !ok { + return nil, fmt.Errorf("invalid kind, %s", kind) + } + root := common.HostProc() + var err error + var inodes map[string][]inodeMap + if pid == 0 { + inodes, err = getProcInodesAll(root, max) + } else { + inodes, err = getProcInodes(root, pid, max) + if len(inodes) == 0 { + // no connection for the pid + return []ConnectionStat{}, nil + } + } + if err != nil { + return nil, fmt.Errorf("cound not get pid(s), %d", pid) + } + return statsFromInodes(root, pid, tmap, inodes) +} + +func statsFromInodes(root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap) ([]ConnectionStat, error) { + dupCheckMap := make(map[string]struct{}) + var ret []ConnectionStat + + var err error + for _, t := range tmap { + var path string + var connKey string + var ls []connTmp + if pid == 0 { + path = fmt.Sprintf("%s/net/%s", root, t.filename) + } else { + path = fmt.Sprintf("%s/%d/net/%s", root, pid, t.filename) + } + switch t.family { + case syscall.AF_INET, syscall.AF_INET6: + ls, err = processInet(path, t, inodes, pid) + case syscall.AF_UNIX: + ls, err = processUnix(path, t, inodes, pid) + } + if err != nil { + return nil, err + } + for _, c := range ls { + // Build TCP key to id the connection uniquely + // socket type, src ip, src port, dst ip, dst port and state should be enough + // to prevent duplications. + connKey = fmt.Sprintf("%d-%s:%d-%s:%d-%s", c.sockType, c.laddr.IP, c.laddr.Port, c.raddr.IP, c.raddr.Port, c.status) + if _, ok := dupCheckMap[connKey]; ok { + continue + } + + conn := ConnectionStat{ + Fd: c.fd, + Family: c.family, + Type: c.sockType, + Laddr: c.laddr, + Raddr: c.raddr, + Status: c.status, + Pid: c.pid, + } + if c.pid == 0 { + conn.Pid = c.boundPid + } else { + conn.Pid = c.pid + } + + // fetch process owner Real, effective, saved set, and filesystem UIDs + proc := process{Pid: conn.Pid} + conn.Uids, _ = proc.getUids() + + ret = append(ret, conn) + dupCheckMap[connKey] = struct{}{} + } + + } + + return ret, nil +} + +// getProcInodes returnes fd of the pid. +func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, error) { + ret := make(map[string][]inodeMap) + + dir := fmt.Sprintf("%s/%d/fd", root, pid) + f, err := os.Open(dir) + if err != nil { + return ret, err + } + defer f.Close() + files, err := f.Readdir(max) + if err != nil { + return ret, err + } + for _, fd := range files { + inodePath := fmt.Sprintf("%s/%d/fd/%s", root, pid, fd.Name()) + + inode, err := os.Readlink(inodePath) + if err != nil { + continue + } + if !strings.HasPrefix(inode, "socket:[") { + continue + } + // the process is using a socket + l := len(inode) + inode = inode[8 : l-1] + _, ok := ret[inode] + if !ok { + ret[inode] = make([]inodeMap, 0) + } + fd, err := strconv.Atoi(fd.Name()) + if err != nil { + continue + } + + i := inodeMap{ + pid: pid, + fd: uint32(fd), + } + ret[inode] = append(ret[inode], i) + } + return ret, nil +} + +// Pids retunres all pids. +// Note: this is a copy of process_linux.Pids() +// FIXME: Import process occures import cycle. +// move to common made other platform breaking. Need consider. +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + + d, err := os.Open(common.HostProc()) + if err != nil { + return nil, err + } + defer d.Close() + + fnames, err := d.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, fname := range fnames { + pid, err := strconv.ParseInt(fname, 10, 32) + if err != nil { + // if not numeric name, just skip + continue + } + ret = append(ret, int32(pid)) + } + + return ret, nil +} + +// Note: the following is based off process_linux structs and methods +// we need these to fetch the owner of a process ID +// FIXME: Import process occures import cycle. +// see remarks on pids() +type process struct { + Pid int32 `json:"pid"` + uids []int32 +} + +// Uids returns user ids of the process as a slice of the int +func (p *process) getUids() ([]int32, error) { + err := p.fillFromStatus() + if err != nil { + return []int32{}, err + } + return p.uids, nil +} + +// Get status from /proc/(pid)/status +func (p *process) fillFromStatus() error { + pid := p.Pid + statPath := common.HostProc(strconv.Itoa(int(pid)), "status") + contents, err := ioutil.ReadFile(statPath) + if err != nil { + return err + } + lines := strings.Split(string(contents), "\n") + for _, line := range lines { + tabParts := strings.SplitN(line, "\t", 2) + if len(tabParts) < 2 { + continue + } + value := tabParts[1] + switch strings.TrimRight(tabParts[0], ":") { + case "Uid": + p.uids = make([]int32, 0, 4) + for _, i := range strings.Split(value, "\t") { + v, err := strconv.ParseInt(i, 10, 32) + if err != nil { + return err + } + p.uids = append(p.uids, int32(v)) + } + } + } + return nil +} + +func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) { + pids, err := Pids() + if err != nil { + return nil, err + } + ret := make(map[string][]inodeMap) + + for _, pid := range pids { + t, err := getProcInodes(root, pid, max) + if err != nil { + // skip if permission error or no longer exists + if os.IsPermission(err) || os.IsNotExist(err) { + continue + } + return ret, err + } + if len(t) == 0 { + continue + } + // TODO: update ret. + ret = updateMap(ret, t) + } + return ret, nil +} + +// decodeAddress decode addresse represents addr in proc/net/* +// ex: +// "0500000A:0016" -> "10.0.0.5", 22 +// "0085002452100113070057A13F025401:0035" -> "2400:8500:1301:1052:a157:7:154:23f", 53 +func decodeAddress(family uint32, src string) (Addr, error) { + t := strings.Split(src, ":") + if len(t) != 2 { + return Addr{}, fmt.Errorf("does not contain port, %s", src) + } + addr := t[0] + port, err := strconv.ParseInt("0x"+t[1], 0, 64) + if err != nil { + return Addr{}, fmt.Errorf("invalid port, %s", src) + } + decoded, err := hex.DecodeString(addr) + if err != nil { + return Addr{}, fmt.Errorf("decode error, %s", err) + } + var ip net.IP + // Assumes this is little_endian + if family == syscall.AF_INET { + ip = net.IP(Reverse(decoded)) + } else { // IPv6 + ip, err = parseIPv6HexString(decoded) + if err != nil { + return Addr{}, err + } + } + return Addr{ + IP: ip.String(), + Port: uint32(port), + }, nil +} + +// Reverse reverses array of bytes. +func Reverse(s []byte) []byte { + return ReverseWithContext(context.Background(), s) +} + +func ReverseWithContext(ctx context.Context, s []byte) []byte { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } + return s +} + +// parseIPv6HexString parse array of bytes to IPv6 string +func parseIPv6HexString(src []byte) (net.IP, error) { + if len(src) != 16 { + return nil, fmt.Errorf("invalid IPv6 string") + } + + buf := make([]byte, 0, 16) + for i := 0; i < len(src); i += 4 { + r := Reverse(src[i : i+4]) + buf = append(buf, r...) + } + return net.IP(buf), nil +} + +func processInet(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { + + if strings.HasSuffix(file, "6") && !common.PathExists(file) { + // IPv6 not supported, return empty. + return []connTmp{}, nil + } + + // Read the contents of the /proc file with a single read sys call. + // This minimizes duplicates in the returned connections + // For more info: + // https://github.com/shirou/gopsutil/pull/361 + contents, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + + lines := bytes.Split(contents, []byte("\n")) + + var ret []connTmp + // skip first line + for _, line := range lines[1:] { + l := strings.Fields(string(line)) + if len(l) < 10 { + continue + } + laddr := l[1] + raddr := l[2] + status := l[3] + inode := l[9] + pid := int32(0) + fd := uint32(0) + i, exists := inodes[inode] + if exists { + pid = i[0].pid + fd = i[0].fd + } + if filterPid > 0 && filterPid != pid { + continue + } + if kind.sockType == syscall.SOCK_STREAM { + status = TCPStatuses[status] + } else { + status = "NONE" + } + la, err := decodeAddress(kind.family, laddr) + if err != nil { + continue + } + ra, err := decodeAddress(kind.family, raddr) + if err != nil { + continue + } + + ret = append(ret, connTmp{ + fd: fd, + family: kind.family, + sockType: kind.sockType, + laddr: la, + raddr: ra, + status: status, + pid: pid, + }) + } + + return ret, nil +} + +func processUnix(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { + // Read the contents of the /proc file with a single read sys call. + // This minimizes duplicates in the returned connections + // For more info: + // https://github.com/shirou/gopsutil/pull/361 + contents, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + + lines := bytes.Split(contents, []byte("\n")) + + var ret []connTmp + // skip first line + for _, line := range lines[1:] { + tokens := strings.Fields(string(line)) + if len(tokens) < 6 { + continue + } + st, err := strconv.Atoi(tokens[4]) + if err != nil { + return nil, err + } + + inode := tokens[6] + + var pairs []inodeMap + pairs, exists := inodes[inode] + if !exists { + pairs = []inodeMap{ + {}, + } + } + for _, pair := range pairs { + if filterPid > 0 && filterPid != pair.pid { + continue + } + var path string + if len(tokens) == 8 { + path = tokens[len(tokens)-1] + } + ret = append(ret, connTmp{ + fd: pair.fd, + family: kind.family, + sockType: uint32(st), + laddr: Addr{ + IP: path, + }, + pid: pair.pid, + status: "NONE", + path: path, + }) + } + } + + return ret, nil +} + +func updateMap(src map[string][]inodeMap, add map[string][]inodeMap) map[string][]inodeMap { + for key, value := range add { + a, exists := src[key] + if !exists { + src[key] = value + continue + } + src[key] = append(a, value...) + } + return src +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_openbsd.go b/vendor/github.com/shirou/gopsutil/net/net_openbsd.go new file mode 100644 index 0000000..3e74e8f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_openbsd.go @@ -0,0 +1,312 @@ +// +build openbsd + +package net + +import ( + "context" + "errors" + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/internal/common" +) + +var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) + +func ParseNetstat(output string, mode string, + iocs map[string]IOCountersStat) error { + lines := strings.Split(output, "\n") + + exists := make([]string, 0, len(lines)-1) + + columns := 6 + if mode == "ind" { + columns = 10 + } + for _, line := range lines { + values := strings.Fields(line) + if len(values) < 1 || values[0] == "Name" { + continue + } + if common.StringsHas(exists, values[0]) { + // skip if already get + continue + } + + if len(values) < columns { + continue + } + base := 1 + // sometimes Address is ommitted + if len(values) < columns { + base = 0 + } + + parsed := make([]uint64, 0, 8) + var vv []string + if mode == "inb" { + vv = []string{ + values[base+3], // BytesRecv + values[base+4], // BytesSent + } + } else { + vv = []string{ + values[base+3], // Ipkts + values[base+4], // Ierrs + values[base+5], // Opkts + values[base+6], // Oerrs + values[base+8], // Drops + } + } + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + t, err := strconv.ParseUint(target, 10, 64) + if err != nil { + return err + } + parsed = append(parsed, t) + } + exists = append(exists, values[0]) + + n, present := iocs[values[0]] + if !present { + n = IOCountersStat{Name: values[0]} + } + if mode == "inb" { + n.BytesRecv = parsed[0] + n.BytesSent = parsed[1] + } else { + n.PacketsRecv = parsed[0] + n.Errin = parsed[1] + n.PacketsSent = parsed[2] + n.Errout = parsed[3] + n.Dropin = parsed[4] + n.Dropout = parsed[4] + } + + iocs[n.Name] = n + } + return nil +} + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + netstat, err := exec.LookPath("netstat") + if err != nil { + return nil, err + } + out, err := invoke.CommandWithContext(ctx, netstat, "-inb") + if err != nil { + return nil, err + } + out2, err := invoke.CommandWithContext(ctx, netstat, "-ind") + if err != nil { + return nil, err + } + iocs := make(map[string]IOCountersStat) + + lines := strings.Split(string(out), "\n") + ret := make([]IOCountersStat, 0, len(lines)-1) + + err = ParseNetstat(string(out), "inb", iocs) + if err != nil { + return nil, err + } + err = ParseNetstat(string(out2), "ind", iocs) + if err != nil { + return nil, err + } + + for _, ioc := range iocs { + ret = append(ret, ioc) + } + + if pernic == false { + return getIOCountersAll(ret) + } + + return ret, nil +} + +// NetIOCountersByFile is an method which is added just a compatibility for linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, errors.New("NetFilterCounters not implemented for openbsd") +} + +// NetProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for OpenBSD +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, errors.New("NetProtoCounters not implemented for openbsd") +} + +func parseNetstatLine(line string) (ConnectionStat, error) { + f := strings.Fields(line) + if len(f) < 5 { + return ConnectionStat{}, fmt.Errorf("wrong line,%s", line) + } + + var netType, netFamily uint32 + switch f[0] { + case "tcp": + netType = syscall.SOCK_STREAM + netFamily = syscall.AF_INET + case "udp": + netType = syscall.SOCK_DGRAM + netFamily = syscall.AF_INET + case "tcp6": + netType = syscall.SOCK_STREAM + netFamily = syscall.AF_INET6 + case "udp6": + netType = syscall.SOCK_DGRAM + netFamily = syscall.AF_INET6 + default: + return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[0]) + } + + laddr, raddr, err := parseNetstatAddr(f[3], f[4], netFamily) + if err != nil { + return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s %s", f[3], f[4]) + } + + n := ConnectionStat{ + Fd: uint32(0), // not supported + Family: uint32(netFamily), + Type: uint32(netType), + Laddr: laddr, + Raddr: raddr, + Pid: int32(0), // not supported + } + if len(f) == 6 { + n.Status = f[5] + } + + return n, nil +} + +func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, raddr Addr, err error) { + parse := func(l string) (Addr, error) { + matches := portMatch.FindStringSubmatch(l) + if matches == nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + host := matches[1] + port := matches[2] + if host == "*" { + switch family { + case syscall.AF_INET: + host = "0.0.0.0" + case syscall.AF_INET6: + host = "::" + default: + return Addr{}, fmt.Errorf("unknown family, %d", family) + } + } + lport, err := strconv.Atoi(port) + if err != nil { + return Addr{}, err + } + return Addr{IP: host, Port: uint32(lport)}, nil + } + + laddr, err = parse(local) + if remote != "*.*" { // remote addr exists + raddr, err = parse(remote) + if err != nil { + return laddr, raddr, err + } + } + + return laddr, raddr, err +} + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + var ret []ConnectionStat + + args := []string{"-na"} + switch strings.ToLower(kind) { + default: + fallthrough + case "": + fallthrough + case "all": + fallthrough + case "inet": + // nothing to add + case "inet4": + args = append(args, "-finet") + case "inet6": + args = append(args, "-finet6") + case "tcp": + args = append(args, "-ptcp") + case "tcp4": + args = append(args, "-ptcp", "-finet") + case "tcp6": + args = append(args, "-ptcp", "-finet6") + case "udp": + args = append(args, "-pudp") + case "udp4": + args = append(args, "-pudp", "-finet") + case "udp6": + args = append(args, "-pudp", "-finet6") + case "unix": + return ret, common.ErrNotImplementedError + } + + netstat, err := exec.LookPath("netstat") + if err != nil { + return nil, err + } + out, err := invoke.CommandWithContext(ctx, netstat, args...) + + if err != nil { + return nil, err + } + lines := strings.Split(string(out), "\n") + for _, line := range lines { + if !(strings.HasPrefix(line, "tcp") || strings.HasPrefix(line, "udp")) { + continue + } + n, err := parseNetstatLine(line) + if err != nil { + continue + } + + ret = append(ret, n) + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_unix.go b/vendor/github.com/shirou/gopsutil/net/net_unix.go new file mode 100644 index 0000000..4451b54 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_unix.go @@ -0,0 +1,96 @@ +// +build freebsd darwin + +package net + +import ( + "context" + "strings" + + "github.com/shirou/gopsutil/internal/common" +) + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsPid(kind, 0) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +// Return a list of network connections opened by a process. +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + var ret []ConnectionStat + + args := []string{"-i"} + switch strings.ToLower(kind) { + default: + fallthrough + case "": + fallthrough + case "all": + fallthrough + case "inet": + args = append(args, "tcp", "-i", "udp") + case "inet4": + args = append(args, "4") + case "inet6": + args = append(args, "6") + case "tcp": + args = append(args, "tcp") + case "tcp4": + args = append(args, "4tcp") + case "tcp6": + args = append(args, "6tcp") + case "udp": + args = append(args, "udp") + case "udp4": + args = append(args, "6udp") + case "udp6": + args = append(args, "6udp") + case "unix": + args = []string{"-U"} + } + + r, err := common.CallLsofWithContext(ctx, invoke, pid, args...) + if err != nil { + return nil, err + } + for _, rr := range r { + if strings.HasPrefix(rr, "COMMAND") { + continue + } + n, err := parseNetLine(rr) + if err != nil { + + continue + } + + ret = append(ret, n) + } + + return ret, nil +} + +// Return up to `max` network connections opened by a process. +func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/net/net_windows.go b/vendor/github.com/shirou/gopsutil/net/net_windows.go new file mode 100644 index 0000000..61eb6ec --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/net/net_windows.go @@ -0,0 +1,645 @@ +// +build windows + +package net + +import ( + "context" + "errors" + "fmt" + "net" + "os" + "syscall" + "unsafe" + + "github.com/shirou/gopsutil/internal/common" + "golang.org/x/sys/windows" +) + +var ( + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + procGetExtendedTCPTable = modiphlpapi.NewProc("GetExtendedTcpTable") + procGetExtendedUDPTable = modiphlpapi.NewProc("GetExtendedUdpTable") +) + +const ( + TCPTableBasicListener = iota + TCPTableBasicConnections + TCPTableBasicAll + TCPTableOwnerPIDListener + TCPTableOwnerPIDConnections + TCPTableOwnerPIDAll + TCPTableOwnerModuleListener + TCPTableOwnerModuleConnections + TCPTableOwnerModuleAll +) + +type netConnectionKindType struct { + family uint32 + sockType uint32 + filename string +} + +var kindTCP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_STREAM, + filename: "tcp", +} +var kindTCP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_STREAM, + filename: "tcp6", +} +var kindUDP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_DGRAM, + filename: "udp", +} +var kindUDP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_DGRAM, + filename: "udp6", +} + +var netConnectionKindMap = map[string][]netConnectionKindType{ + "all": {kindTCP4, kindTCP6, kindUDP4, kindUDP6}, + "tcp": {kindTCP4, kindTCP6}, + "tcp4": {kindTCP4}, + "tcp6": {kindTCP6}, + "udp": {kindUDP4, kindUDP6}, + "udp4": {kindUDP4}, + "udp6": {kindUDP6}, + "inet": {kindTCP4, kindTCP6, kindUDP4, kindUDP6}, + "inet4": {kindTCP4, kindUDP4}, + "inet6": {kindTCP6, kindUDP6}, +} + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + ifs, err := net.Interfaces() + if err != nil { + return nil, err + } + var ret []IOCountersStat + + for _, ifi := range ifs { + c := IOCountersStat{ + Name: ifi.Name, + } + + row := windows.MibIfRow{Index: uint32(ifi.Index)} + e := windows.GetIfEntry(&row) + if e != nil { + return nil, os.NewSyscallError("GetIfEntry", e) + } + c.BytesSent = uint64(row.OutOctets) + c.BytesRecv = uint64(row.InOctets) + c.PacketsSent = uint64(row.OutUcastPkts) + c.PacketsRecv = uint64(row.InUcastPkts) + c.Errin = uint64(row.InErrors) + c.Errout = uint64(row.OutErrors) + c.Dropin = uint64(row.InDiscards) + c.Dropout = uint64(row.OutDiscards) + + ret = append(ret, c) + } + + if pernic == false { + return getIOCountersAll(ret) + } + return ret, nil +} + +// NetIOCountersByFile is an method which is added just a compatibility for linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +// Return a list of network connections +// Available kind: +// reference to netConnectionKindMap +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(ctx, kind, 0) +} + +// ConnectionsPid Return a list of network connections opened by a process +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + tmap, ok := netConnectionKindMap[kind] + if !ok { + return nil, fmt.Errorf("invalid kind, %s", kind) + } + return getProcInet(tmap, pid) +} + +func getProcInet(kinds []netConnectionKindType, pid int32) ([]ConnectionStat, error) { + stats := make([]ConnectionStat, 0) + + for _, kind := range kinds { + s, err := getNetStatWithKind(kind) + if err != nil { + continue + } + + if pid == 0 { + stats = append(stats, s...) + } else { + for _, ns := range s { + if ns.Pid != pid { + continue + } + stats = append(stats, ns) + } + } + } + + return stats, nil +} + +func getNetStatWithKind(kindType netConnectionKindType) ([]ConnectionStat, error) { + if kindType.filename == "" { + return nil, fmt.Errorf("kind filename must be required") + } + + switch kindType.filename { + case kindTCP4.filename: + return getTCPConnections(kindTCP4.family) + case kindTCP6.filename: + return getTCPConnections(kindTCP6.family) + case kindUDP4.filename: + return getUDPConnections(kindUDP4.family) + case kindUDP6.filename: + return getUDPConnections(kindUDP6.family) + } + + return nil, fmt.Errorf("invalid kind filename, %s", kindType.filename) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, errors.New("NetFilterCounters not implemented for windows") +} + +// NetProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for Windows +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, errors.New("NetProtoCounters not implemented for windows") +} + +func getTableUintptr(family uint32, buf []byte) uintptr { + var ( + pmibTCPTable pmibTCPTableOwnerPidAll + pmibTCP6Table pmibTCP6TableOwnerPidAll + + p uintptr + ) + switch family { + case kindTCP4.family: + if len(buf) > 0 { + pmibTCPTable = (*mibTCPTableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } else { + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } + case kindTCP6.family: + if len(buf) > 0 { + pmibTCP6Table = (*mibTCP6TableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } else { + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } + } + return p +} + +func getTableInfo(filename string, table interface{}) (index, step, length int) { + switch filename { + case kindTCP4.filename: + index = int(unsafe.Sizeof(table.(pmibTCPTableOwnerPidAll).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibTCPTableOwnerPidAll).Table)) + length = int(table.(pmibTCPTableOwnerPidAll).DwNumEntries) + case kindTCP6.filename: + index = int(unsafe.Sizeof(table.(pmibTCP6TableOwnerPidAll).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibTCP6TableOwnerPidAll).Table)) + length = int(table.(pmibTCP6TableOwnerPidAll).DwNumEntries) + case kindUDP4.filename: + index = int(unsafe.Sizeof(table.(pmibUDPTableOwnerPid).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibUDPTableOwnerPid).Table)) + length = int(table.(pmibUDPTableOwnerPid).DwNumEntries) + case kindUDP6.filename: + index = int(unsafe.Sizeof(table.(pmibUDP6TableOwnerPid).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibUDP6TableOwnerPid).Table)) + length = int(table.(pmibUDP6TableOwnerPid).DwNumEntries) + } + + return +} + +func getTCPConnections(family uint32) ([]ConnectionStat, error) { + var ( + p uintptr + buf []byte + size uint32 + + pmibTCPTable pmibTCPTableOwnerPidAll + pmibTCP6Table pmibTCP6TableOwnerPidAll + ) + + if family == 0 { + return nil, fmt.Errorf("faimly must be required") + } + + for { + switch family { + case kindTCP4.family: + if len(buf) > 0 { + pmibTCPTable = (*mibTCPTableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } else { + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } + case kindTCP6.family: + if len(buf) > 0 { + pmibTCP6Table = (*mibTCP6TableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } else { + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } + } + + err := getExtendedTcpTable(p, + &size, + true, + family, + tcpTableOwnerPidAll, + 0) + if err == nil { + break + } + if err != windows.ERROR_INSUFFICIENT_BUFFER { + return nil, err + } + buf = make([]byte, size) + } + + var ( + index, step int + length int + ) + + stats := make([]ConnectionStat, 0) + switch family { + case kindTCP4.family: + index, step, length = getTableInfo(kindTCP4.filename, pmibTCPTable) + case kindTCP6.family: + index, step, length = getTableInfo(kindTCP6.filename, pmibTCP6Table) + } + + if length == 0 { + return nil, nil + } + + for i := 0; i < length; i++ { + switch family { + case kindTCP4.family: + mibs := (*mibTCPRowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + case kindTCP6.family: + mibs := (*mibTCP6RowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + } + + index += step + } + return stats, nil +} + +func getUDPConnections(family uint32) ([]ConnectionStat, error) { + var ( + p uintptr + buf []byte + size uint32 + + pmibUDPTable pmibUDPTableOwnerPid + pmibUDP6Table pmibUDP6TableOwnerPid + ) + + if family == 0 { + return nil, fmt.Errorf("faimly must be required") + } + + for { + switch family { + case kindUDP4.family: + if len(buf) > 0 { + pmibUDPTable = (*mibUDPTableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibUDPTable)) + } else { + p = uintptr(unsafe.Pointer(pmibUDPTable)) + } + case kindUDP6.family: + if len(buf) > 0 { + pmibUDP6Table = (*mibUDP6TableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibUDP6Table)) + } else { + p = uintptr(unsafe.Pointer(pmibUDP6Table)) + } + } + + err := getExtendedUdpTable( + p, + &size, + true, + family, + udpTableOwnerPid, + 0, + ) + if err == nil { + break + } + if err != windows.ERROR_INSUFFICIENT_BUFFER { + return nil, err + } + buf = make([]byte, size) + } + + var ( + index, step, length int + ) + + stats := make([]ConnectionStat, 0) + switch family { + case kindUDP4.family: + index, step, length = getTableInfo(kindUDP4.filename, pmibUDPTable) + case kindUDP6.family: + index, step, length = getTableInfo(kindUDP6.filename, pmibUDP6Table) + } + + if length == 0 { + return nil, nil + } + + for i := 0; i < length; i++ { + switch family { + case kindUDP4.family: + mibs := (*mibUDPRowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + case kindUDP4.family: + mibs := (*mibUDP6RowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + } + + index += step + } + return stats, nil +} + +// tcpStatuses https://msdn.microsoft.com/en-us/library/windows/desktop/bb485761(v=vs.85).aspx +var tcpStatuses = map[mibTCPState]string{ + 1: "CLOSED", + 2: "LISTEN", + 3: "SYN_SENT", + 4: "SYN_RECEIVED", + 5: "ESTABLISHED", + 6: "FIN_WAIT_1", + 7: "FIN_WAIT_2", + 8: "CLOSE_WAIT", + 9: "CLOSING", + 10: "LAST_ACK", + 11: "TIME_WAIT", + 12: "DELETE", +} + +func getExtendedTcpTable(pTcpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass tcpTableClass, reserved uint32) (errcode error) { + r1, _, _ := syscall.Syscall6(procGetExtendedTCPTable.Addr(), 6, pTcpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) + if r1 != 0 { + errcode = syscall.Errno(r1) + } + return +} + +func getExtendedUdpTable(pUdpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass udpTableClass, reserved uint32) (errcode error) { + r1, _, _ := syscall.Syscall6(procGetExtendedUDPTable.Addr(), 6, pUdpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) + if r1 != 0 { + errcode = syscall.Errno(r1) + } + return +} + +func getUintptrFromBool(b bool) uintptr { + if b { + return 1 + } + return 0 +} + +const anySize = 1 + +// type MIB_TCP_STATE int32 +type mibTCPState int32 + +type tcpTableClass int32 + +const ( + tcpTableBasicListener tcpTableClass = iota + tcpTableBasicConnections + tcpTableBasicAll + tcpTableOwnerPidListener + tcpTableOwnerPidConnections + tcpTableOwnerPidAll + tcpTableOwnerModuleListener + tcpTableOwnerModuleConnections + tcpTableOwnerModuleAll +) + +type udpTableClass int32 + +const ( + udpTableBasic udpTableClass = iota + udpTableOwnerPid + udpTableOwnerModule +) + +// TCP + +type mibTCPRowOwnerPid struct { + DwState uint32 + DwLocalAddr uint32 + DwLocalPort uint32 + DwRemoteAddr uint32 + DwRemotePort uint32 + DwOwningPid uint32 +} + +func (m *mibTCPRowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindTCP4.family, + Type: kindTCP4.sockType, + Laddr: Addr{ + IP: parseIPv4HexString(m.DwLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Raddr: Addr{ + IP: parseIPv4HexString(m.DwRemoteAddr), + Port: uint32(decodePort(m.DwRemotePort)), + }, + Pid: int32(m.DwOwningPid), + Status: tcpStatuses[mibTCPState(m.DwState)], + } + + return ns +} + +type mibTCPTableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibTCPRowOwnerPid +} + +type mibTCP6RowOwnerPid struct { + UcLocalAddr [16]byte + DwLocalScopeId uint32 + DwLocalPort uint32 + UcRemoteAddr [16]byte + DwRemoteScopeId uint32 + DwRemotePort uint32 + DwState uint32 + DwOwningPid uint32 +} + +func (m *mibTCP6RowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindTCP6.family, + Type: kindTCP6.sockType, + Laddr: Addr{ + IP: parseIPv6HexString(m.UcLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Raddr: Addr{ + IP: parseIPv6HexString(m.UcRemoteAddr), + Port: uint32(decodePort(m.DwRemotePort)), + }, + Pid: int32(m.DwOwningPid), + Status: tcpStatuses[mibTCPState(m.DwState)], + } + + return ns +} + +type mibTCP6TableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibTCP6RowOwnerPid +} + +type pmibTCPTableOwnerPidAll *mibTCPTableOwnerPid +type pmibTCP6TableOwnerPidAll *mibTCP6TableOwnerPid + +// UDP + +type mibUDPRowOwnerPid struct { + DwLocalAddr uint32 + DwLocalPort uint32 + DwOwningPid uint32 +} + +func (m *mibUDPRowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindUDP4.family, + Type: kindUDP4.sockType, + Laddr: Addr{ + IP: parseIPv4HexString(m.DwLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Pid: int32(m.DwOwningPid), + } + + return ns +} + +type mibUDPTableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibUDPRowOwnerPid +} + +type mibUDP6RowOwnerPid struct { + UcLocalAddr [16]byte + DwLocalScopeId uint32 + DwLocalPort uint32 + DwOwningPid uint32 +} + +func (m *mibUDP6RowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindUDP6.family, + Type: kindUDP6.sockType, + Laddr: Addr{ + IP: parseIPv6HexString(m.UcLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Pid: int32(m.DwOwningPid), + } + + return ns +} + +type mibUDP6TableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibUDP6RowOwnerPid +} + +type pmibUDPTableOwnerPid *mibUDPTableOwnerPid +type pmibUDP6TableOwnerPid *mibUDP6TableOwnerPid + +func decodePort(port uint32) uint16 { + return syscall.Ntohs(uint16(port)) +} + +func parseIPv4HexString(addr uint32) string { + return fmt.Sprintf("%d.%d.%d.%d", addr&255, addr>>8&255, addr>>16&255, addr>>24&255) +} + +func parseIPv6HexString(addr [16]byte) string { + var ret [16]byte + for i := 0; i < 16; i++ { + ret[i] = uint8(addr[i]) + } + + // convert []byte to net.IP + ip := net.IP(ret[:]) + return ip.String() +} diff --git a/vendor/github.com/shirou/gopsutil/process/process.go b/vendor/github.com/shirou/gopsutil/process/process.go new file mode 100644 index 0000000..a9d10e0 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process.go @@ -0,0 +1,262 @@ +package process + +import ( + "context" + "encoding/json" + "errors" + "runtime" + "time" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/mem" +) + +var ( + invoke common.Invoker = common.Invoke{} + ErrorNoChildren = errors.New("process does not have children") +) + +type Process struct { + Pid int32 `json:"pid"` + name string + status string + parent int32 + numCtxSwitches *NumCtxSwitchesStat + uids []int32 + gids []int32 + numThreads int32 + memInfo *MemoryInfoStat + sigInfo *SignalInfoStat + + lastCPUTimes *cpu.TimesStat + lastCPUTime time.Time + + tgid int32 +} + +type OpenFilesStat struct { + Path string `json:"path"` + Fd uint64 `json:"fd"` +} + +type MemoryInfoStat struct { + RSS uint64 `json:"rss"` // bytes + VMS uint64 `json:"vms"` // bytes + HWM uint64 `json:"hwm"` // bytes + Data uint64 `json:"data"` // bytes + Stack uint64 `json:"stack"` // bytes + Locked uint64 `json:"locked"` // bytes + Swap uint64 `json:"swap"` // bytes +} + +type SignalInfoStat struct { + PendingProcess uint64 `json:"pending_process"` + PendingThread uint64 `json:"pending_thread"` + Blocked uint64 `json:"blocked"` + Ignored uint64 `json:"ignored"` + Caught uint64 `json:"caught"` +} + +type RlimitStat struct { + Resource int32 `json:"resource"` + Soft int32 `json:"soft"` //TODO too small. needs to be uint64 + Hard int32 `json:"hard"` //TODO too small. needs to be uint64 + Used uint64 `json:"used"` +} + +type IOCountersStat struct { + ReadCount uint64 `json:"readCount"` + WriteCount uint64 `json:"writeCount"` + ReadBytes uint64 `json:"readBytes"` + WriteBytes uint64 `json:"writeBytes"` +} + +type NumCtxSwitchesStat struct { + Voluntary int64 `json:"voluntary"` + Involuntary int64 `json:"involuntary"` +} + +type PageFaultsStat struct { + MinorFaults uint64 `json:"minorFaults"` + MajorFaults uint64 `json:"majorFaults"` + ChildMinorFaults uint64 `json:"childMinorFaults"` + ChildMajorFaults uint64 `json:"childMajorFaults"` +} + +// Resource limit constants are from /usr/include/x86_64-linux-gnu/bits/resource.h +// from libc6-dev package in Ubuntu 16.10 +const ( + RLIMIT_CPU int32 = 0 + RLIMIT_FSIZE int32 = 1 + RLIMIT_DATA int32 = 2 + RLIMIT_STACK int32 = 3 + RLIMIT_CORE int32 = 4 + RLIMIT_RSS int32 = 5 + RLIMIT_NPROC int32 = 6 + RLIMIT_NOFILE int32 = 7 + RLIMIT_MEMLOCK int32 = 8 + RLIMIT_AS int32 = 9 + RLIMIT_LOCKS int32 = 10 + RLIMIT_SIGPENDING int32 = 11 + RLIMIT_MSGQUEUE int32 = 12 + RLIMIT_NICE int32 = 13 + RLIMIT_RTPRIO int32 = 14 + RLIMIT_RTTIME int32 = 15 +) + +func (p Process) String() string { + s, _ := json.Marshal(p) + return string(s) +} + +func (o OpenFilesStat) String() string { + s, _ := json.Marshal(o) + return string(s) +} + +func (m MemoryInfoStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +func (r RlimitStat) String() string { + s, _ := json.Marshal(r) + return string(s) +} + +func (i IOCountersStat) String() string { + s, _ := json.Marshal(i) + return string(s) +} + +func (p NumCtxSwitchesStat) String() string { + s, _ := json.Marshal(p) + return string(s) +} + +func PidExists(pid int32) (bool, error) { + return PidExistsWithContext(context.Background(), pid) +} + +func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { + pids, err := Pids() + if err != nil { + return false, err + } + + for _, i := range pids { + if i == pid { + return true, err + } + } + + return false, err +} + +// Background returns true if the process is in background, false otherwise. +func (p *Process) Background() (bool, error) { + return p.BackgroundWithContext(context.Background()) +} + +func (p *Process) BackgroundWithContext(ctx context.Context) (bool, error) { + fg, err := p.ForegroundWithContext(ctx) + if err != nil { + return false, err + } + return !fg, err +} + +// If interval is 0, return difference from last call(non-blocking). +// If interval > 0, wait interval sec and return diffrence between start and end. +func (p *Process) Percent(interval time.Duration) (float64, error) { + return p.PercentWithContext(context.Background(), interval) +} + +func (p *Process) PercentWithContext(ctx context.Context, interval time.Duration) (float64, error) { + cpuTimes, err := p.Times() + if err != nil { + return 0, err + } + now := time.Now() + + if interval > 0 { + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + time.Sleep(interval) + cpuTimes, err = p.Times() + now = time.Now() + if err != nil { + return 0, err + } + } else { + if p.lastCPUTimes == nil { + // invoked first time + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + return 0, nil + } + } + + numcpu := runtime.NumCPU() + delta := (now.Sub(p.lastCPUTime).Seconds()) * float64(numcpu) + ret := calculatePercent(p.lastCPUTimes, cpuTimes, delta, numcpu) + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + return ret, nil +} + +func calculatePercent(t1, t2 *cpu.TimesStat, delta float64, numcpu int) float64 { + if delta == 0 { + return 0 + } + delta_proc := t2.Total() - t1.Total() + overall_percent := ((delta_proc / delta) * 100) * float64(numcpu) + return overall_percent +} + +// MemoryPercent returns how many percent of the total RAM this process uses +func (p *Process) MemoryPercent() (float32, error) { + return p.MemoryPercentWithContext(context.Background()) +} + +func (p *Process) MemoryPercentWithContext(ctx context.Context) (float32, error) { + machineMemory, err := mem.VirtualMemory() + if err != nil { + return 0, err + } + total := machineMemory.Total + + processMemory, err := p.MemoryInfo() + if err != nil { + return 0, err + } + used := processMemory.RSS + + return (100 * float32(used) / float32(total)), nil +} + +// CPU_Percent returns how many percent of the CPU time this process uses +func (p *Process) CPUPercent() (float64, error) { + return p.CPUPercentWithContext(context.Background()) +} + +func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { + crt_time, err := p.CreateTime() + if err != nil { + return 0, err + } + + cput, err := p.Times() + if err != nil { + return 0, err + } + + created := time.Unix(0, crt_time*int64(time.Millisecond)) + totalTime := time.Since(created).Seconds() + if totalTime <= 0 { + return 0, nil + } + + return 100 * cput.Total() / totalTime, nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/process/process_darwin.go new file mode 100644 index 0000000..0e83030 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_darwin.go @@ -0,0 +1,662 @@ +// +build darwin + +package process + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + "unsafe" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/net" + "golang.org/x/sys/unix" +) + +// copied from sys/sysctl.h +const ( + CTLKern = 1 // "high kernel": proc, limits + KernProc = 14 // struct: process entries + KernProcPID = 1 // by process id + KernProcProc = 8 // only return procs + KernProcAll = 0 // everything + KernProcPathname = 12 // path to executable +) + +const ( + ClockTicks = 100 // C.sysconf(C._SC_CLK_TCK) +) + +type _Ctype_struct___0 struct { + Pad uint64 +} + +// MemoryInfoExStat is different between OSes +type MemoryInfoExStat struct { +} + +type MemoryMapsStat struct { +} + +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + + pids, err := callPsWithContext(ctx, "pid", 0, false) + if err != nil { + return ret, err + } + + for _, pid := range pids { + v, err := strconv.Atoi(pid[0]) + if err != nil { + return ret, err + } + ret = append(ret, int32(v)) + } + + return ret, nil +} + +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + r, err := callPsWithContext(ctx, "ppid", p.Pid, false) + if err != nil { + return 0, err + } + + v, err := strconv.Atoi(r[0][0]) + if err != nil { + return 0, err + } + + return int32(v), err +} +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + return common.IntToString(k.Proc.P_comm[:]), nil +} +func (p *Process) Tgid() (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + lsof_bin, err := exec.LookPath("lsof") + if err != nil { + return "", err + } + + awk_bin, err := exec.LookPath("awk") + if err != nil { + return "", err + } + + sed_bin, err := exec.LookPath("sed") + if err != nil { + return "", err + } + + lsof := exec.CommandContext(ctx, lsof_bin, "-p", strconv.Itoa(int(p.Pid)), "-Fpfn") + awk := exec.CommandContext(ctx, awk_bin, "NR==5{print}") + sed := exec.CommandContext(ctx, sed_bin, "s/n\\//\\//") + + output, _, err := common.Pipeline(lsof, awk, sed) + + if err != nil { + return "", err + } + + ret := strings.TrimSpace(string(output)) + + return ret, nil +} + +// Cmdline returns the command line arguments of the process as a string with +// each argument separated by 0x20 ascii character. +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + r, err := callPsWithContext(ctx, "command", p.Pid, false) + if err != nil { + return "", err + } + return strings.Join(r[0], " "), err +} + +// CmdlineSlice returns the command line arguments of the process as a slice with each +// element being an argument. Because of current deficiencies in the way that the command +// line arguments are found, single arguments that have spaces in the will actually be +// reported as two separate items. In order to do something better CGO would be needed +// to use the native darwin functions. +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + r, err := callPsWithContext(ctx, "command", p.Pid, false) + if err != nil { + return nil, err + } + return r[0], err +} +func (p *Process) CreateTime() (int64, error) { + return p.CreateTimeWithContext(context.Background()) +} + +func (p *Process) CreateTimeWithContext(ctx context.Context) (int64, error) { + r, err := callPsWithContext(ctx, "etime", p.Pid, false) + if err != nil { + return 0, err + } + + elapsedSegments := strings.Split(strings.Replace(r[0][0], "-", ":", 1), ":") + var elapsedDurations []time.Duration + for i := len(elapsedSegments) - 1; i >= 0; i-- { + p, err := strconv.ParseInt(elapsedSegments[i], 10, 0) + if err != nil { + return 0, err + } + elapsedDurations = append(elapsedDurations, time.Duration(p)) + } + + var elapsed = time.Duration(elapsedDurations[0]) * time.Second + if len(elapsedDurations) > 1 { + elapsed += time.Duration(elapsedDurations[1]) * time.Minute + } + if len(elapsedDurations) > 2 { + elapsed += time.Duration(elapsedDurations[2]) * time.Hour + } + if len(elapsedDurations) > 3 { + elapsed += time.Duration(elapsedDurations[3]) * time.Hour * 24 + } + + start := time.Now().Add(-elapsed) + return start.Unix() * 1000, nil +} +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + rr, err := common.CallLsofWithContext(ctx, invoke, p.Pid, "-FR") + if err != nil { + return nil, err + } + for _, r := range rr { + if strings.HasPrefix(r, "p") { // skip if process + continue + } + l := string(r) + v, err := strconv.Atoi(strings.Replace(l, "R", "", 1)) + if err != nil { + return nil, err + } + return NewProcess(int32(v)) + } + return nil, fmt.Errorf("could not find parent line") +} +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + r, err := callPsWithContext(ctx, "state", p.Pid, false) + if err != nil { + return "", err + } + + return r[0][0], err +} + +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + ps, err := exec.LookPath("ps") + if err != nil { + return false, err + } + out, err := invoke.CommandWithContext(ctx, ps, "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} + +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html + userEffectiveUID := int32(k.Eproc.Ucred.UID) + + return []int32{userEffectiveUID}, nil +} +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]int32, 0, 3) + gids = append(gids, int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Ucred.Ngroups), int32(k.Eproc.Pcred.P_svgid)) + + return gids, nil +} +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError + /* + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Eproc.Tdev) + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil + */ +} +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Proc.P_nice), nil +} +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + r, err := callPsWithContext(ctx, "utime,stime", p.Pid, true) + if err != nil { + return 0, err + } + return int32(len(r)), nil +} +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + ret := make(map[int32]*cpu.TimesStat) + return ret, common.ErrNotImplementedError +} + +func convertCPUTimes(s string) (ret float64, err error) { + var t int + var _tmp string + if strings.Contains(s, ":") { + _t := strings.Split(s, ":") + hour, err := strconv.Atoi(_t[0]) + if err != nil { + return ret, err + } + t += hour * 60 * 100 + _tmp = _t[1] + } else { + _tmp = s + } + + _t := strings.Split(_tmp, ".") + if err != nil { + return ret, err + } + h, err := strconv.Atoi(_t[0]) + t += h * 100 + h, err = strconv.Atoi(_t[1]) + t += h + return float64(t) / ClockTicks, nil +} +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + r, err := callPsWithContext(ctx, "utime,stime", p.Pid, false) + + if err != nil { + return nil, err + } + + utime, err := convertCPUTimes(r[0][0]) + if err != nil { + return nil, err + } + stime, err := convertCPUTimes(r[0][1]) + if err != nil { + return nil, err + } + + ret := &cpu.TimesStat{ + CPU: "cpu", + User: utime, + System: stime, + } + return ret, nil +} +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + r, err := callPsWithContext(ctx, "rss,vsize,pagein", p.Pid, false) + if err != nil { + return nil, err + } + rss, err := strconv.Atoi(r[0][0]) + if err != nil { + return nil, err + } + vms, err := strconv.Atoi(r[0][1]) + if err != nil { + return nil, err + } + pagein, err := strconv.Atoi(r[0][2]) + if err != nil { + return nil, err + } + + ret := &MemoryInfoStat{ + RSS: uint64(rss) * 1024, + VMS: uint64(vms) * 1024, + Swap: uint64(pagein), + } + + return ret, nil +} +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + return nil, err + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcess(pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPid("all", p.Pid) +} + +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) IsRunning() (bool, error) { + return p.IsRunningWithContext(context.Background()) +} + +func (p *Process) IsRunningWithContext(ctx context.Context) (bool, error) { + return true, common.ErrNotImplementedError +} +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + var ret []MemoryMapsStat + return &ret, common.ErrNotImplementedError +} + +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, err + } + + for _, pid := range pids { + p, err := NewProcess(pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +func parseKinfoProc(buf []byte) (KinfoProc, error) { + var k KinfoProc + br := bytes.NewReader(buf) + + err := common.Read(br, binary.LittleEndian, &k) + if err != nil { + return k, err + } + + return k, nil +} + +// Returns a proc as defined here: +// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html +func (p *Process) getKProc() (*KinfoProc, error) { + return p.getKProcWithContext(context.Background()) +} + +func (p *Process) getKProcWithContext(ctx context.Context) (*KinfoProc, error) { + mib := []int32{CTLKern, KernProc, KernProcPID, p.Pid} + procK := KinfoProc{} + length := uint64(unsafe.Sizeof(procK)) + buf := make([]byte, length) + _, _, syserr := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if syserr != 0 { + return nil, syserr + } + k, err := parseKinfoProc(buf) + if err != nil { + return nil, err + } + + return &k, nil +} + +func NewProcess(pid int32) (*Process, error) { + p := &Process{Pid: pid} + + return p, nil +} + +// call ps command. +// Return value deletes Header line(you must not input wrong arg). +// And splited by Space. Caller have responsibility to manage. +// If passed arg pid is 0, get information from all process. +func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption bool) ([][]string, error) { + bin, err := exec.LookPath("ps") + if err != nil { + return [][]string{}, err + } + + var cmd []string + if pid == 0 { // will get from all processes. + cmd = []string{"-ax", "-o", arg} + } else if threadOption { + cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))} + } else { + cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))} + } + out, err := invoke.CommandWithContext(ctx, bin, cmd...) + if err != nil { + return [][]string{}, err + } + lines := strings.Split(string(out), "\n") + + var ret [][]string + for _, l := range lines[1:] { + var lr []string + for _, r := range strings.Split(l, " ") { + if r == "" { + continue + } + lr = append(lr, strings.TrimSpace(r)) + } + if len(lr) != 0 { + ret = append(ret, lr) + } + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_darwin_386.go b/vendor/github.com/shirou/gopsutil/process/process_darwin_386.go new file mode 100644 index 0000000..f8e9223 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_darwin_386.go @@ -0,0 +1,234 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package process + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type UGid_t uint32 + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Eproc struct { + Paddr *uint64 + Sess *Session + Pcred Upcred + Ucred Uucred + Pad_cgo_0 [4]byte + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Pad_cgo_1 [2]byte + Tdev int32 + Tpgid int32 + Pad_cgo_2 [4]byte + Tsess *Session + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Pad_cgo_3 [2]byte + Flag int32 + Login [12]int8 + Spare [4]int32 + Pad_cgo_4 [4]byte +} + +type Proc struct{} + +type Session struct{} + +type ucred struct { + Link _Ctype_struct___0 + Ref uint64 + Posix Posix_cred + Label *Label + Audit Au_session +} + +type Uucred struct { + Ref int32 + UID uint32 + Ngroups int16 + Pad_cgo_0 [2]byte + Groups [16]uint32 +} + +type Upcred struct { + Pc_lock [72]int8 + Pc_ucred *ucred + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + Pad_cgo_0 [4]byte +} + +type Vmspace struct { + Dummy int32 + Pad_cgo_0 [4]byte + Dummy2 *int8 + Dummy3 [5]int32 + Pad_cgo_1 [4]byte + Dummy4 [3]*int8 +} + +type Sigacts struct{} + +type ExternProc struct { + P_un [16]byte + P_vmspace uint64 + P_sigacts uint64 + Pad_cgo_0 [3]byte + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + Pad_cgo_1 [4]byte + User_stack uint64 + Exit_thread uint64 + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + Pad_cgo_2 [4]byte + P_wchan uint64 + P_wmesg uint64 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + Pad_cgo_3 [4]byte + P_tracep uint64 + P_siglist int32 + Pad_cgo_4 [4]byte + P_textvp uint64 + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + Pad_cgo_5 [4]byte + P_pgrp uint64 + P_addr uint64 + P_xstat uint16 + P_acflag uint16 + Pad_cgo_6 [4]byte + P_ru uint64 +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type Vnode struct{} + +type Pgrp struct{} + +type UserStruct struct{} + +type Au_session struct { + Aia_p *AuditinfoAddr + Mask AuMask +} + +type Posix_cred struct { + UID uint32 + Ruid uint32 + Svuid uint32 + Ngroups int16 + Pad_cgo_0 [2]byte + Groups [16]uint32 + Rgid uint32 + Svgid uint32 + Gmuid uint32 + Flags int32 +} + +type Label struct{} + +type AuditinfoAddr struct { + Auid uint32 + Mask AuMask + Termid AuTidAddr + Asid int32 + Flags uint64 +} +type AuMask struct { + Success uint32 + Failure uint32 +} +type AuTidAddr struct { + Port int32 + Type uint32 + Addr [4]uint32 +} + +type UcredQueue struct { + Next *ucred + Prev **ucred +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/process/process_darwin_amd64.go new file mode 100644 index 0000000..f8e9223 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_darwin_amd64.go @@ -0,0 +1,234 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package process + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type UGid_t uint32 + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Eproc struct { + Paddr *uint64 + Sess *Session + Pcred Upcred + Ucred Uucred + Pad_cgo_0 [4]byte + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Pad_cgo_1 [2]byte + Tdev int32 + Tpgid int32 + Pad_cgo_2 [4]byte + Tsess *Session + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Pad_cgo_3 [2]byte + Flag int32 + Login [12]int8 + Spare [4]int32 + Pad_cgo_4 [4]byte +} + +type Proc struct{} + +type Session struct{} + +type ucred struct { + Link _Ctype_struct___0 + Ref uint64 + Posix Posix_cred + Label *Label + Audit Au_session +} + +type Uucred struct { + Ref int32 + UID uint32 + Ngroups int16 + Pad_cgo_0 [2]byte + Groups [16]uint32 +} + +type Upcred struct { + Pc_lock [72]int8 + Pc_ucred *ucred + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + Pad_cgo_0 [4]byte +} + +type Vmspace struct { + Dummy int32 + Pad_cgo_0 [4]byte + Dummy2 *int8 + Dummy3 [5]int32 + Pad_cgo_1 [4]byte + Dummy4 [3]*int8 +} + +type Sigacts struct{} + +type ExternProc struct { + P_un [16]byte + P_vmspace uint64 + P_sigacts uint64 + Pad_cgo_0 [3]byte + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + Pad_cgo_1 [4]byte + User_stack uint64 + Exit_thread uint64 + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + Pad_cgo_2 [4]byte + P_wchan uint64 + P_wmesg uint64 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + Pad_cgo_3 [4]byte + P_tracep uint64 + P_siglist int32 + Pad_cgo_4 [4]byte + P_textvp uint64 + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + Pad_cgo_5 [4]byte + P_pgrp uint64 + P_addr uint64 + P_xstat uint16 + P_acflag uint16 + Pad_cgo_6 [4]byte + P_ru uint64 +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type Vnode struct{} + +type Pgrp struct{} + +type UserStruct struct{} + +type Au_session struct { + Aia_p *AuditinfoAddr + Mask AuMask +} + +type Posix_cred struct { + UID uint32 + Ruid uint32 + Svuid uint32 + Ngroups int16 + Pad_cgo_0 [2]byte + Groups [16]uint32 + Rgid uint32 + Svgid uint32 + Gmuid uint32 + Flags int32 +} + +type Label struct{} + +type AuditinfoAddr struct { + Auid uint32 + Mask AuMask + Termid AuTidAddr + Asid int32 + Flags uint64 +} +type AuMask struct { + Success uint32 + Failure uint32 +} +type AuTidAddr struct { + Port int32 + Type uint32 + Addr [4]uint32 +} + +type UcredQueue struct { + Next *ucred + Prev **ucred +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_fallback.go b/vendor/github.com/shirou/gopsutil/process/process_fallback.go new file mode 100644 index 0000000..f95eca6 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_fallback.go @@ -0,0 +1,325 @@ +// +build !darwin,!linux,!freebsd,!openbsd,!windows + +package process + +import ( + "context" + "syscall" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/net" +) + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +type MemoryInfoExStat struct { +} + +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + return []int32{}, common.ErrNotImplementedError +} + +func Processes() ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func NewProcess(pid int32) (*Process, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Tgid() (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + return []string{}, common.ErrNotImplementedError +} +func (p *Process) CreateTime() (int64, error) { + return p.CreateTimeWithContext(context.Background()) +} + +func (p *Process) CreateTimeWithContext(ctx context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + return []int32{}, common.ErrNotImplementedError +} +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + return []int32{}, common.ErrNotImplementedError +} +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return []OpenFilesStat{}, common.ErrNotImplementedError +} +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return []net.ConnectionStat{}, common.ErrNotImplementedError +} +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + return []net.IOCountersStat{}, common.ErrNotImplementedError +} +func (p *Process) IsRunning() (bool, error) { + return p.IsRunningWithContext(context.Background()) +} + +func (p *Process) IsRunningWithContext(ctx context.Context) (bool, error) { + return true, common.ErrNotImplementedError +} +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) SendSignal(sig syscall.Signal) error { + return p.SendSignalWithContext(context.Background(), sig) +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { + return common.ErrNotImplementedError +} +func (p *Process) Suspend() error { + return p.SuspendWithContext(context.Background()) +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} +func (p *Process) Resume() error { + return p.ResumeWithContext(context.Background()) +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} +func (p *Process) Terminate() error { + return p.TerminateWithContext(context.Background()) +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} +func (p *Process) Kill() error { + return p.KillWithContext(context.Background()) +} + +func (p *Process) KillWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} +func (p *Process) Username() (string, error) { + return p.UsernameWithContext(context.Background()) +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/process/process_freebsd.go new file mode 100644 index 0000000..e807e35 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_freebsd.go @@ -0,0 +1,509 @@ +// +build freebsd + +package process + +import ( + "bytes" + "context" + "encoding/binary" + "os/exec" + "strconv" + "strings" + + cpu "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + net "github.com/shirou/gopsutil/net" + "golang.org/x/sys/unix" +) + +// MemoryInfoExStat is different between OSes +type MemoryInfoExStat struct { +} + +type MemoryMapsStat struct { +} + +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + procs, err := Processes() + if err != nil { + return ret, nil + } + + for _, p := range procs { + ret = append(ret, p.Pid) + } + + return ret, nil +} + +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Ppid, nil +} +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + return common.IntToString(k.Comm[:]), nil +} +func (p *Process) Tgid() (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + ret := strings.FieldsFunc(string(buf), func(r rune) bool { + if r == '\u0000' { + return true + } + return false + }) + + return strings.Join(ret, " "), nil +} + +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return nil, err + } + if len(buf) == 0 { + return nil, nil + } + if buf[len(buf)-1] == 0 { + buf = buf[:len(buf)-1] + } + parts := bytes.Split(buf, []byte{0}) + var strParts []string + for _, p := range parts { + strParts = append(strParts, string(p)) + } + + return strParts, nil +} +func (p *Process) CreateTime() (int64, error) { + return p.CreateTimeWithContext(context.Background()) +} + +func (p *Process) CreateTimeWithContext(ctx context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + return p, common.ErrNotImplementedError +} +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + var s string + switch k.Stat { + case SIDL: + s = "I" + case SRUN: + s = "R" + case SSLEEP: + s = "S" + case SSTOP: + s = "T" + case SZOMB: + s = "Z" + case SWAIT: + s = "W" + case SLOCK: + s = "L" + } + + return s, nil +} + +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + ps, err := exec.LookPath("ps") + if err != nil { + return false, err + } + out, err := invoke.CommandWithContext(ctx, ps, "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} + +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + uids := make([]int32, 0, 3) + + uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + + return uids, nil +} +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]int32, 0, 3) + gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + + return gids, nil +} +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Tdev) + + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil +} +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Nice), nil +} +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &IOCountersStat{ + ReadCount: uint64(k.Rusage.Inblock), + WriteCount: uint64(k.Rusage.Oublock), + }, nil +} +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Numthreads, nil +} +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + ret := make(map[int32]*cpu.TimesStat) + return ret, common.ErrNotImplementedError +} +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &cpu.TimesStat{ + CPU: "cpu", + User: float64(k.Rusage.Utime.Sec) + float64(k.Rusage.Utime.Usec)/1000000, + System: float64(k.Rusage.Stime.Sec) + float64(k.Rusage.Stime.Usec)/1000000, + }, nil +} +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + v, err := unix.Sysctl("vm.stats.vm.v_page_size") + if err != nil { + return nil, err + } + pageSize := common.LittleEndian.Uint16([]byte(v)) + + return &MemoryInfoStat{ + RSS: uint64(k.Rssize) * uint64(pageSize), + VMS: uint64(k.Size), + }, nil +} +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + return nil, err + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcess(pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) IsRunning() (bool, error) { + return p.IsRunningWithContext(context.Background()) +} + +func (p *Process) IsRunningWithContext(ctx context.Context) (bool, error) { + return true, common.ErrNotImplementedError +} +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + var ret []MemoryMapsStat + return &ret, common.ErrNotImplementedError +} + +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + results := []*Process{} + + mib := []int32{CTLKern, KernProc, KernProcProc, 0} + buf, length, err := common.CallSyscall(mib) + if err != nil { + return results, err + } + + // get kinfo_proc size + count := int(length / uint64(sizeOfKinfoProc)) + + // parse buf to procs + for i := 0; i < count; i++ { + b := buf[i*sizeOfKinfoProc : (i+1)*sizeOfKinfoProc] + k, err := parseKinfoProc(b) + if err != nil { + continue + } + p, err := NewProcess(int32(k.Pid)) + if err != nil { + continue + } + + results = append(results, p) + } + + return results, nil +} + +func parseKinfoProc(buf []byte) (KinfoProc, error) { + var k KinfoProc + br := bytes.NewReader(buf) + err := common.Read(br, binary.LittleEndian, &k) + return k, err +} + +func (p *Process) getKProc() (*KinfoProc, error) { + return p.getKProcWithContext(context.Background()) +} + +func (p *Process) getKProcWithContext(ctx context.Context) (*KinfoProc, error) { + mib := []int32{CTLKern, KernProc, KernProcPID, p.Pid} + + buf, length, err := common.CallSyscall(mib) + if err != nil { + return nil, err + } + if length != sizeOfKinfoProc { + return nil, err + } + + k, err := parseKinfoProc(buf) + if err != nil { + return nil, err + } + return &k, nil +} + +func NewProcess(pid int32) (*Process, error) { + p := &Process{Pid: pid} + + return p, nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_freebsd_386.go b/vendor/github.com/shirou/gopsutil/process/process_freebsd_386.go new file mode 100644 index 0000000..08ab333 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_freebsd_386.go @@ -0,0 +1,192 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 +) + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x300 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int32 /* pargs */ + Paddr int32 /* proc */ + Addr int32 /* user */ + Tracep int32 /* vnode */ + Textvp int32 /* vnode */ + Fd int32 /* filedesc */ + Vmspace int32 /* vmspace */ + Wchan int32 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint32 + Rssize int32 + Swrss int32 + Tsize int32 + Dsize int32 + Ssize int32 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int32 + Kiflag int32 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu uint8 + Lastcpu uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Sparestrings [50]int8 + Spareints [7]int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int32 /* pcb */ + Kstack int32 + Udata int32 + Tdaddr int32 /* thread */ + Spareptrs [6]int32 + Sparelongs [12]int32 + Sflag int32 + Tdflags int32 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev uint32 + Vn_mode uint16 + Status uint16 + X_kve_ispare [12]int32 + Path [1024]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/process/process_freebsd_amd64.go new file mode 100644 index 0000000..560e627 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_freebsd_amd64.go @@ -0,0 +1,192 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x440 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int64 /* pargs */ + Paddr int64 /* proc */ + Addr int64 /* user */ + Tracep int64 /* vnode */ + Textvp int64 /* vnode */ + Fd int64 /* filedesc */ + Vmspace int64 /* vmspace */ + Wchan int64 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint64 + Rssize int64 + Swrss int64 + Tsize int64 + Dsize int64 + Ssize int64 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int64 + Kiflag int64 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu uint8 + Lastcpu uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Sparestrings [50]int8 + Spareints [7]int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int64 /* pcb */ + Kstack int64 + Udata int64 + Tdaddr int64 /* thread */ + Spareptrs [6]int64 + Sparelongs [12]int64 + Sflag int64 + Tdflags int64 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev uint32 + Vn_mode uint16 + Status uint16 + X_kve_ispare [12]int32 + Path [1024]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/process/process_freebsd_arm.go new file mode 100644 index 0000000..81ae0b9 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_freebsd_arm.go @@ -0,0 +1,192 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 +) + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x440 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int32 + Max int32 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int32 /* pargs */ + Paddr int32 /* proc */ + Addr int32 /* user */ + Tracep int32 /* vnode */ + Textvp int32 /* vnode */ + Fd int32 /* filedesc */ + Vmspace int32 /* vmspace */ + Wchan int32 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint32 + Rssize int32 + Swrss int32 + Tsize int32 + Dsize int32 + Ssize int32 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int32 + Kiflag int32 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu uint8 + Lastcpu uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Sparestrings [50]int8 + Spareints [4]int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int32 /* pcb */ + Kstack int32 + Udata int32 + Tdaddr int32 /* thread */ + Spareptrs [6]int64 + Sparelongs [12]int64 + Sflag int64 + Tdflags int64 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev uint32 + Vn_mode uint16 + Status uint16 + X_kve_ispare [12]int32 + Path [1024]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_linux.go b/vendor/github.com/shirou/gopsutil/process/process_linux.go new file mode 100644 index 0000000..00fdd33 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_linux.go @@ -0,0 +1,1345 @@ +// +build linux + +package process + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "math" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/internal/common" + "github.com/shirou/gopsutil/net" + "golang.org/x/sys/unix" +) + +var PageSize = uint64(os.Getpagesize()) + +const ( + PrioProcess = 0 // linux/resource.h + ClockTicks = 100 // C.sysconf(C._SC_CLK_TCK) +) + +// MemoryInfoExStat is different between OSes +type MemoryInfoExStat struct { + RSS uint64 `json:"rss"` // bytes + VMS uint64 `json:"vms"` // bytes + Shared uint64 `json:"shared"` // bytes + Text uint64 `json:"text"` // bytes + Lib uint64 `json:"lib"` // bytes + Data uint64 `json:"data"` // bytes + Dirty uint64 `json:"dirty"` // bytes +} + +func (m MemoryInfoExStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +// String returns JSON value of the process. +func (m MemoryMapsStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +// NewProcess creates a new Process instance, it only stores the pid and +// checks that the process exists. Other method on Process can be used +// to get more information about the process. An error will be returned +// if the process does not exist. +func NewProcess(pid int32) (*Process, error) { + p := &Process{ + Pid: int32(pid), + } + file, err := os.Open(common.HostProc(strconv.Itoa(int(p.Pid)))) + defer file.Close() + return p, err +} + +// Ppid returns Parent Process ID of the process. +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + _, ppid, _, _, _, _, _, err := p.fillFromStat() + if err != nil { + return -1, err + } + return ppid, nil +} + +// Name returns name of the process. +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + if p.name == "" { + if err := p.fillFromStatus(); err != nil { + return "", err + } + } + return p.name, nil +} + +// Tgid returns tgid, a Linux-synonym for user-space Pid +func (p *Process) Tgid() (int32, error) { + if p.tgid == 0 { + if err := p.fillFromStatus(); err != nil { + return 0, err + } + } + return p.tgid, nil +} + +// Exe returns executable path of the process. +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return p.fillFromExe() +} + +// Cmdline returns the command line arguments of the process as a string with +// each argument separated by 0x20 ascii character. +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + return p.fillFromCmdline() +} + +// CmdlineSlice returns the command line arguments of the process as a slice with each +// element being an argument. +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + return p.fillSliceFromCmdline() +} + +// CreateTime returns created time of the process in milliseconds since the epoch, in UTC. +func (p *Process) CreateTime() (int64, error) { + return p.CreateTimeWithContext(context.Background()) +} + +func (p *Process) CreateTimeWithContext(ctx context.Context) (int64, error) { + _, _, _, createTime, _, _, _, err := p.fillFromStat() + if err != nil { + return 0, err + } + return createTime, nil +} + +// Cwd returns current working directory of the process. +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return p.fillFromCwd() +} + +// Parent returns parent Process of the process. +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + err := p.fillFromStatus() + if err != nil { + return nil, err + } + if p.parent == 0 { + return nil, fmt.Errorf("wrong number of parents") + } + return NewProcess(p.parent) +} + +// Status returns the process status. +// Return value could be one of these. +// R: Running S: Sleep T: Stop I: Idle +// Z: Zombie W: Wait L: Lock +// The charactor is same within all supported platforms. +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + err := p.fillFromStatus() + if err != nil { + return "", err + } + return p.status, nil +} + +// Foreground returns true if the process is in foreground, false otherwise. +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + statPath := common.HostProc(strconv.Itoa(int(pid)), "stat") + contents, err := ioutil.ReadFile(statPath) + if err != nil { + return false, err + } + fields := strings.Fields(string(contents)) + if len(fields) < 8 { + return false, fmt.Errorf("insufficient data in %s", statPath) + } + pgid := fields[4] + tpgid := fields[7] + return pgid == tpgid, nil +} + +// Uids returns user ids of the process as a slice of the int +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + err := p.fillFromStatus() + if err != nil { + return []int32{}, err + } + return p.uids, nil +} + +// Gids returns group ids of the process as a slice of the int +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + err := p.fillFromStatus() + if err != nil { + return []int32{}, err + } + return p.gids, nil +} + +// Terminal returns a terminal which is associated with the process. +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + t, _, _, _, _, _, _, err := p.fillFromStat() + if err != nil { + return "", err + } + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + terminal := termmap[t] + return terminal, nil +} + +// Nice returns a nice value (priority). +// Notice: gopsutil can not set nice value. +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + _, _, _, _, _, nice, _, err := p.fillFromStat() + if err != nil { + return 0, err + } + return nice, nil +} + +// IOnice returns process I/O nice value (priority). +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +// Rlimit returns Resource Limits. +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + return p.RlimitUsage(false) +} + +// RlimitUsage returns Resource Limits. +// If gatherUsed is true, the currently used value will be gathered and added +// to the resulting RlimitStat. +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + rlimits, err := p.fillFromLimits() + if !gatherUsed || err != nil { + return rlimits, err + } + + _, _, _, _, rtprio, nice, _, err := p.fillFromStat() + if err != nil { + return nil, err + } + if err := p.fillFromStatus(); err != nil { + return nil, err + } + + for i := range rlimits { + rs := &rlimits[i] + switch rs.Resource { + case RLIMIT_CPU: + times, err := p.Times() + if err != nil { + return nil, err + } + rs.Used = uint64(times.User + times.System) + case RLIMIT_DATA: + rs.Used = uint64(p.memInfo.Data) + case RLIMIT_STACK: + rs.Used = uint64(p.memInfo.Stack) + case RLIMIT_RSS: + rs.Used = uint64(p.memInfo.RSS) + case RLIMIT_NOFILE: + n, err := p.NumFDs() + if err != nil { + return nil, err + } + rs.Used = uint64(n) + case RLIMIT_MEMLOCK: + rs.Used = uint64(p.memInfo.Locked) + case RLIMIT_AS: + rs.Used = uint64(p.memInfo.VMS) + case RLIMIT_LOCKS: + //TODO we can get the used value from /proc/$pid/locks. But linux doesn't enforce it, so not a high priority. + case RLIMIT_SIGPENDING: + rs.Used = p.sigInfo.PendingProcess + case RLIMIT_NICE: + // The rlimit for nice is a little unusual, in that 0 means the niceness cannot be decreased beyond the current value, but it can be increased. + // So effectively: if rs.Soft == 0 { rs.Soft = rs.Used } + rs.Used = uint64(nice) + case RLIMIT_RTPRIO: + rs.Used = uint64(rtprio) + } + } + + return rlimits, err +} + +// IOCounters returns IO Counters. +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return p.fillFromIO() +} + +// NumCtxSwitches returns the number of the context switches of the process. +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + err := p.fillFromStatus() + if err != nil { + return nil, err + } + return p.numCtxSwitches, nil +} + +// NumFDs returns the number of File Descriptors used by the process. +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + _, fnames, err := p.fillFromfdList() + return int32(len(fnames)), err +} + +// NumThreads returns the number of threads used by the process. +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + err := p.fillFromStatus() + if err != nil { + return 0, err + } + return p.numThreads, nil +} + +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + ret := make(map[int32]*cpu.TimesStat) + taskPath := common.HostProc(strconv.Itoa(int(p.Pid)), "task") + + tids, err := readPidsFromDir(taskPath) + if err != nil { + return nil, err + } + + for _, tid := range tids { + _, _, cpuTimes, _, _, _, _, err := p.fillFromTIDStat(tid) + if err != nil { + return nil, err + } + ret[tid] = cpuTimes + } + + return ret, nil +} + +// Times returns CPU times of the process. +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + _, _, cpuTimes, _, _, _, _, err := p.fillFromStat() + if err != nil { + return nil, err + } + return cpuTimes, nil +} + +// CPUAffinity returns CPU affinity of the process. +// +// Notice: Not implemented yet. +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +// MemoryInfo returns platform in-dependend memory information, such as RSS, VMS and Swap +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + meminfo, _, err := p.fillFromStatm() + if err != nil { + return nil, err + } + return meminfo, nil +} + +// MemoryInfoEx returns platform dependend memory information. +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + _, memInfoEx, err := p.fillFromStatm() + if err != nil { + return nil, err + } + return memInfoEx, nil +} + +// PageFaultsInfo returns the process's page fault counters +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + _, _, _, _, _, _, pageFaults, err := p.fillFromStat() + if err != nil { + return nil, err + } + return pageFaults, nil + +} + +// Children returns a slice of Process of the process. +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + if pids == nil || len(pids) == 0 { + return nil, ErrorNoChildren + } + return nil, err + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcess(pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +// OpenFiles returns a slice of OpenFilesStat opend by the process. +// OpenFilesStat includes a file path and file descriptor. +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + _, ofs, err := p.fillFromfd() + if err != nil { + return nil, err + } + ret := make([]OpenFilesStat, len(ofs)) + for i, o := range ofs { + ret[i] = *o + } + + return ret, nil +} + +// Connections returns a slice of net.ConnectionStat used by the process. +// This returns all kind of the connection. This measn TCP, UDP or UNIX. +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPid("all", p.Pid) +} + +// NetIOCounters returns NetIOCounters of the process. +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + filename := common.HostProc(strconv.Itoa(int(p.Pid)), "net/dev") + return net.IOCountersByFile(pernic, filename) +} + +// IsRunning returns whether the process is running or not. +// Not implemented yet. +func (p *Process) IsRunning() (bool, error) { + return p.IsRunningWithContext(context.Background()) +} + +func (p *Process) IsRunningWithContext(ctx context.Context) (bool, error) { + return true, common.ErrNotImplementedError +} + +// MemoryMaps get memory maps from /proc/(pid)/smaps +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + pid := p.Pid + var ret []MemoryMapsStat + if grouped { + ret = make([]MemoryMapsStat, 1) + } + smapsPath := common.HostProc(strconv.Itoa(int(pid)), "smaps") + contents, err := ioutil.ReadFile(smapsPath) + if err != nil { + return nil, err + } + lines := strings.Split(string(contents), "\n") + + // function of parsing a block + getBlock := func(first_line []string, block []string) (MemoryMapsStat, error) { + m := MemoryMapsStat{} + m.Path = first_line[len(first_line)-1] + + for _, line := range block { + if strings.Contains(line, "VmFlags") { + continue + } + field := strings.Split(line, ":") + if len(field) < 2 { + continue + } + v := strings.Trim(field[1], " kB") // remove last "kB" + t, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return m, err + } + + switch field[0] { + case "Size": + m.Size = t + case "Rss": + m.Rss = t + case "Pss": + m.Pss = t + case "Shared_Clean": + m.SharedClean = t + case "Shared_Dirty": + m.SharedDirty = t + case "Private_Clean": + m.PrivateClean = t + case "Private_Dirty": + m.PrivateDirty = t + case "Referenced": + m.Referenced = t + case "Anonymous": + m.Anonymous = t + case "Swap": + m.Swap = t + } + } + return m, nil + } + + blocks := make([]string, 16) + for _, line := range lines { + field := strings.Split(line, " ") + if strings.HasSuffix(field[0], ":") == false { + // new block section + if len(blocks) > 0 { + g, err := getBlock(field, blocks) + if err != nil { + return &ret, err + } + if grouped { + ret[0].Size += g.Size + ret[0].Rss += g.Rss + ret[0].Pss += g.Pss + ret[0].SharedClean += g.SharedClean + ret[0].SharedDirty += g.SharedDirty + ret[0].PrivateClean += g.PrivateClean + ret[0].PrivateDirty += g.PrivateDirty + ret[0].Referenced += g.Referenced + ret[0].Anonymous += g.Anonymous + ret[0].Swap += g.Swap + } else { + ret = append(ret, g) + } + } + // starts new block + blocks = make([]string, 16) + } else { + blocks = append(blocks, line) + } + } + + return &ret, nil +} + +/** +** Internal functions +**/ + +func limitToInt(val string) (int32, error) { + if val == "unlimited" { + return math.MaxInt32, nil + } else { + res, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return 0, err + } + return int32(res), nil + } +} + +// Get num_fds from /proc/(pid)/limits +func (p *Process) fillFromLimits() ([]RlimitStat, error) { + return p.fillFromLimitsWithContext(context.Background()) +} + +func (p *Process) fillFromLimitsWithContext(ctx context.Context) ([]RlimitStat, error) { + pid := p.Pid + limitsFile := common.HostProc(strconv.Itoa(int(pid)), "limits") + d, err := os.Open(limitsFile) + if err != nil { + return nil, err + } + defer d.Close() + + var limitStats []RlimitStat + + limitsScanner := bufio.NewScanner(d) + for limitsScanner.Scan() { + var statItem RlimitStat + + str := strings.Fields(limitsScanner.Text()) + + // Remove the header line + if strings.Contains(str[len(str)-1], "Units") { + continue + } + + // Assert that last item is a Hard limit + statItem.Hard, err = limitToInt(str[len(str)-1]) + if err != nil { + // On error remove last item an try once again since it can be unit or header line + str = str[:len(str)-1] + statItem.Hard, err = limitToInt(str[len(str)-1]) + if err != nil { + return nil, err + } + } + // Remove last item from string + str = str[:len(str)-1] + + //Now last item is a Soft limit + statItem.Soft, err = limitToInt(str[len(str)-1]) + if err != nil { + return nil, err + } + // Remove last item from string + str = str[:len(str)-1] + + //The rest is a stats name + resourceName := strings.Join(str, " ") + switch resourceName { + case "Max cpu time": + statItem.Resource = RLIMIT_CPU + case "Max file size": + statItem.Resource = RLIMIT_FSIZE + case "Max data size": + statItem.Resource = RLIMIT_DATA + case "Max stack size": + statItem.Resource = RLIMIT_STACK + case "Max core file size": + statItem.Resource = RLIMIT_CORE + case "Max resident set": + statItem.Resource = RLIMIT_RSS + case "Max processes": + statItem.Resource = RLIMIT_NPROC + case "Max open files": + statItem.Resource = RLIMIT_NOFILE + case "Max locked memory": + statItem.Resource = RLIMIT_MEMLOCK + case "Max address space": + statItem.Resource = RLIMIT_AS + case "Max file locks": + statItem.Resource = RLIMIT_LOCKS + case "Max pending signals": + statItem.Resource = RLIMIT_SIGPENDING + case "Max msgqueue size": + statItem.Resource = RLIMIT_MSGQUEUE + case "Max nice priority": + statItem.Resource = RLIMIT_NICE + case "Max realtime priority": + statItem.Resource = RLIMIT_RTPRIO + case "Max realtime timeout": + statItem.Resource = RLIMIT_RTTIME + default: + continue + } + + limitStats = append(limitStats, statItem) + } + + if err := limitsScanner.Err(); err != nil { + return nil, err + } + + return limitStats, nil +} + +// Get list of /proc/(pid)/fd files +func (p *Process) fillFromfdList() (string, []string, error) { + return p.fillFromfdListWithContext(context.Background()) +} + +func (p *Process) fillFromfdListWithContext(ctx context.Context) (string, []string, error) { + pid := p.Pid + statPath := common.HostProc(strconv.Itoa(int(pid)), "fd") + d, err := os.Open(statPath) + if err != nil { + return statPath, []string{}, err + } + defer d.Close() + fnames, err := d.Readdirnames(-1) + return statPath, fnames, err +} + +// Get num_fds from /proc/(pid)/fd +func (p *Process) fillFromfd() (int32, []*OpenFilesStat, error) { + return p.fillFromfdWithContext(context.Background()) +} + +func (p *Process) fillFromfdWithContext(ctx context.Context) (int32, []*OpenFilesStat, error) { + statPath, fnames, err := p.fillFromfdList() + if err != nil { + return 0, nil, err + } + numFDs := int32(len(fnames)) + + var openfiles []*OpenFilesStat + for _, fd := range fnames { + fpath := filepath.Join(statPath, fd) + filepath, err := os.Readlink(fpath) + if err != nil { + continue + } + t, err := strconv.ParseUint(fd, 10, 64) + if err != nil { + return numFDs, openfiles, err + } + o := &OpenFilesStat{ + Path: filepath, + Fd: t, + } + openfiles = append(openfiles, o) + } + + return numFDs, openfiles, nil +} + +// Get cwd from /proc/(pid)/cwd +func (p *Process) fillFromCwd() (string, error) { + return p.fillFromCwdWithContext(context.Background()) +} + +func (p *Process) fillFromCwdWithContext(ctx context.Context) (string, error) { + pid := p.Pid + cwdPath := common.HostProc(strconv.Itoa(int(pid)), "cwd") + cwd, err := os.Readlink(cwdPath) + if err != nil { + return "", err + } + return string(cwd), nil +} + +// Get exe from /proc/(pid)/exe +func (p *Process) fillFromExe() (string, error) { + return p.fillFromExeWithContext(context.Background()) +} + +func (p *Process) fillFromExeWithContext(ctx context.Context) (string, error) { + pid := p.Pid + exePath := common.HostProc(strconv.Itoa(int(pid)), "exe") + exe, err := os.Readlink(exePath) + if err != nil { + return "", err + } + return string(exe), nil +} + +// Get cmdline from /proc/(pid)/cmdline +func (p *Process) fillFromCmdline() (string, error) { + return p.fillFromCmdlineWithContext(context.Background()) +} + +func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) { + pid := p.Pid + cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline") + cmdline, err := ioutil.ReadFile(cmdPath) + if err != nil { + return "", err + } + ret := strings.FieldsFunc(string(cmdline), func(r rune) bool { + if r == '\u0000' { + return true + } + return false + }) + + return strings.Join(ret, " "), nil +} + +func (p *Process) fillSliceFromCmdline() ([]string, error) { + return p.fillSliceFromCmdlineWithContext(context.Background()) +} + +func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) { + pid := p.Pid + cmdPath := common.HostProc(strconv.Itoa(int(pid)), "cmdline") + cmdline, err := ioutil.ReadFile(cmdPath) + if err != nil { + return nil, err + } + if len(cmdline) == 0 { + return nil, nil + } + if cmdline[len(cmdline)-1] == 0 { + cmdline = cmdline[:len(cmdline)-1] + } + parts := bytes.Split(cmdline, []byte{0}) + var strParts []string + for _, p := range parts { + strParts = append(strParts, string(p)) + } + + return strParts, nil +} + +// Get IO status from /proc/(pid)/io +func (p *Process) fillFromIO() (*IOCountersStat, error) { + return p.fillFromIOWithContext(context.Background()) +} + +func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, error) { + pid := p.Pid + ioPath := common.HostProc(strconv.Itoa(int(pid)), "io") + ioline, err := ioutil.ReadFile(ioPath) + if err != nil { + return nil, err + } + lines := strings.Split(string(ioline), "\n") + ret := &IOCountersStat{} + + for _, line := range lines { + field := strings.Fields(line) + if len(field) < 2 { + continue + } + t, err := strconv.ParseUint(field[1], 10, 64) + if err != nil { + return nil, err + } + param := field[0] + if strings.HasSuffix(param, ":") { + param = param[:len(param)-1] + } + switch param { + case "syscr": + ret.ReadCount = t + case "syscw": + ret.WriteCount = t + case "read_bytes": + ret.ReadBytes = t + case "write_bytes": + ret.WriteBytes = t + } + } + + return ret, nil +} + +// Get memory info from /proc/(pid)/statm +func (p *Process) fillFromStatm() (*MemoryInfoStat, *MemoryInfoExStat, error) { + return p.fillFromStatmWithContext(context.Background()) +} + +func (p *Process) fillFromStatmWithContext(ctx context.Context) (*MemoryInfoStat, *MemoryInfoExStat, error) { + pid := p.Pid + memPath := common.HostProc(strconv.Itoa(int(pid)), "statm") + contents, err := ioutil.ReadFile(memPath) + if err != nil { + return nil, nil, err + } + fields := strings.Split(string(contents), " ") + + vms, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, nil, err + } + rss, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, nil, err + } + memInfo := &MemoryInfoStat{ + RSS: rss * PageSize, + VMS: vms * PageSize, + } + + shared, err := strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, nil, err + } + text, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, nil, err + } + lib, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, nil, err + } + dirty, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, nil, err + } + + memInfoEx := &MemoryInfoExStat{ + RSS: rss * PageSize, + VMS: vms * PageSize, + Shared: shared * PageSize, + Text: text * PageSize, + Lib: lib * PageSize, + Dirty: dirty * PageSize, + } + + return memInfo, memInfoEx, nil +} + +// Get various status from /proc/(pid)/status +func (p *Process) fillFromStatus() error { + return p.fillFromStatusWithContext(context.Background()) +} + +func (p *Process) fillFromStatusWithContext(ctx context.Context) error { + pid := p.Pid + statPath := common.HostProc(strconv.Itoa(int(pid)), "status") + contents, err := ioutil.ReadFile(statPath) + if err != nil { + return err + } + lines := strings.Split(string(contents), "\n") + p.numCtxSwitches = &NumCtxSwitchesStat{} + p.memInfo = &MemoryInfoStat{} + p.sigInfo = &SignalInfoStat{} + for _, line := range lines { + tabParts := strings.SplitN(line, "\t", 2) + if len(tabParts) < 2 { + continue + } + value := tabParts[1] + switch strings.TrimRight(tabParts[0], ":") { + case "Name": + p.name = strings.Trim(value, " \t") + if len(p.name) >= 15 { + cmdlineSlice, err := p.CmdlineSlice() + if err != nil { + return err + } + if len(cmdlineSlice) > 0 { + extendedName := filepath.Base(cmdlineSlice[0]) + if strings.HasPrefix(extendedName, p.name) { + p.name = extendedName + } else { + p.name = cmdlineSlice[0] + } + } + } + case "State": + p.status = value[0:1] + case "PPid", "Ppid": + pval, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + p.parent = int32(pval) + case "Tgid": + pval, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + p.tgid = int32(pval) + case "Uid": + p.uids = make([]int32, 0, 4) + for _, i := range strings.Split(value, "\t") { + v, err := strconv.ParseInt(i, 10, 32) + if err != nil { + return err + } + p.uids = append(p.uids, int32(v)) + } + case "Gid": + p.gids = make([]int32, 0, 4) + for _, i := range strings.Split(value, "\t") { + v, err := strconv.ParseInt(i, 10, 32) + if err != nil { + return err + } + p.gids = append(p.gids, int32(v)) + } + case "Threads": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + p.numThreads = int32(v) + case "voluntary_ctxt_switches": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + p.numCtxSwitches.Voluntary = v + case "nonvoluntary_ctxt_switches": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + p.numCtxSwitches.Involuntary = v + case "VmRSS": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.RSS = v * 1024 + case "VmSize": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.VMS = v * 1024 + case "VmSwap": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Swap = v * 1024 + case "VmHWM": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.HWM = v * 1024 + case "VmData": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Data = v * 1024 + case "VmStk": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Stack = v * 1024 + case "VmLck": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Locked = v * 1024 + case "SigPnd": + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.PendingThread = v + case "ShdPnd": + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.PendingProcess = v + case "SigBlk": + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.Blocked = v + case "SigIgn": + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.Ignored = v + case "SigCgt": + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.Caught = v + } + + } + return nil +} + +func (p *Process) fillFromTIDStat(tid int32) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) { + return p.fillFromTIDStatWithContext(context.Background(), tid) +} + +func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) { + pid := p.Pid + var statPath string + + if tid == -1 { + statPath = common.HostProc(strconv.Itoa(int(pid)), "stat") + } else { + statPath = common.HostProc(strconv.Itoa(int(pid)), "task", strconv.Itoa(int(tid)), "stat") + } + + contents, err := ioutil.ReadFile(statPath) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + fields := strings.Fields(string(contents)) + + i := 1 + for !strings.HasSuffix(fields[i], ")") { + i++ + } + + terminal, err := strconv.ParseUint(fields[i+5], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + ppid, err := strconv.ParseInt(fields[i+2], 10, 32) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + utime, err := strconv.ParseFloat(fields[i+12], 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + stime, err := strconv.ParseFloat(fields[i+13], 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + cpuTimes := &cpu.TimesStat{ + CPU: "cpu", + User: float64(utime / ClockTicks), + System: float64(stime / ClockTicks), + } + + bootTime, _ := host.BootTime() + t, err := strconv.ParseUint(fields[i+20], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + ctime := (t / uint64(ClockTicks)) + uint64(bootTime) + createTime := int64(ctime * 1000) + + rtpriority, err := strconv.ParseInt(fields[i+16], 10, 32) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + if rtpriority < 0 { + rtpriority = rtpriority*-1 - 1 + } else { + rtpriority = 0 + } + + // p.Nice = mustParseInt32(fields[18]) + // use syscall instead of parse Stat file + snice, _ := unix.Getpriority(PrioProcess, int(pid)) + nice := int32(snice) // FIXME: is this true? + + minFault, err := strconv.ParseUint(fields[i+8], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + cMinFault, err := strconv.ParseUint(fields[i+9], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + majFault, err := strconv.ParseUint(fields[i+10], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + cMajFault, err := strconv.ParseUint(fields[i+11], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + faults := &PageFaultsStat{ + MinorFaults: minFault, + MajorFaults: majFault, + ChildMinorFaults: cMinFault, + ChildMajorFaults: cMajFault, + } + + return terminal, int32(ppid), cpuTimes, createTime, uint32(rtpriority), nice, faults, nil +} + +func (p *Process) fillFromStat() (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) { + return p.fillFromStatWithContext(context.Background()) +} + +func (p *Process) fillFromStatWithContext(ctx context.Context) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) { + return p.fillFromTIDStat(-1) +} + +// Pids returns a slice of process ID list which are running now. +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + return readPidsFromDir(common.HostProc()) +} + +// Process returns a slice of pointers to Process structs for all +// currently running processes. +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, err + } + + for _, pid := range pids { + p, err := NewProcess(pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +func readPidsFromDir(path string) ([]int32, error) { + var ret []int32 + + d, err := os.Open(path) + if err != nil { + return nil, err + } + defer d.Close() + + fnames, err := d.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, fname := range fnames { + pid, err := strconv.ParseInt(fname, 10, 32) + if err != nil { + // if not numeric name, just skip + continue + } + ret = append(ret, int32(pid)) + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/process/process_openbsd.go new file mode 100644 index 0000000..9e01ea7 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_openbsd.go @@ -0,0 +1,537 @@ +// +build openbsd + +package process + +import ( + "C" + "bytes" + "context" + "encoding/binary" + "os/exec" + "strconv" + "strings" + "unsafe" + + cpu "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + mem "github.com/shirou/gopsutil/mem" + net "github.com/shirou/gopsutil/net" + "golang.org/x/sys/unix" +) + +// MemoryInfoExStat is different between OSes +type MemoryInfoExStat struct { +} + +type MemoryMapsStat struct { +} + +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + procs, err := Processes() + if err != nil { + return ret, nil + } + + for _, p := range procs { + ret = append(ret, p.Pid) + } + + return ret, nil +} + +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Ppid, nil +} +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + return common.IntToString(k.Comm[:]), nil +} +func (p *Process) Tgid() (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + mib := []int32{CTLKern, KernProcArgs, p.Pid, KernProcArgv} + buf, _, err := common.CallSyscall(mib) + + if err != nil { + return nil, err + } + + argc := 0 + argvp := unsafe.Pointer(&buf[0]) + argv := *(**C.char)(unsafe.Pointer(argvp)) + size := unsafe.Sizeof(argv) + var strParts []string + + for argv != nil { + strParts = append(strParts, C.GoString(argv)) + + argc++ + argv = *(**C.char)(unsafe.Pointer(uintptr(argvp) + uintptr(argc)*size)) + } + return strParts, nil +} + +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + argv, err := p.CmdlineSlice() + if err != nil { + return "", err + } + return strings.Join(argv, " "), nil +} + +func (p *Process) CreateTime() (int64, error) { + return p.CreateTimeWithContext(context.Background()) +} + +func (p *Process) CreateTimeWithContext(ctx context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + return p, common.ErrNotImplementedError +} +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + var s string + switch k.Stat { + case SIDL: + case SRUN: + case SONPROC: + s = "R" + case SSLEEP: + s = "S" + case SSTOP: + s = "T" + case SDEAD: + s = "Z" + } + + return s, nil +} +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + ps, err := exec.LookPath("ps") + if err != nil { + return false, err + } + out, err := invoke.CommandWithContext(ctx, ps, "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + uids := make([]int32, 0, 3) + + uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + + return uids, nil +} +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]int32, 0, 3) + gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + + return gids, nil +} +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Tdev) + + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil +} +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Nice), nil +} +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + var rlimit []RlimitStat + return rlimit, common.ErrNotImplementedError +} +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &IOCountersStat{ + ReadCount: uint64(k.Uru_inblock), + WriteCount: uint64(k.Uru_oublock), + }, nil +} +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + /* not supported, just return 1 */ + return 1, nil +} +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + ret := make(map[int32]*cpu.TimesStat) + return ret, common.ErrNotImplementedError +} +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &cpu.TimesStat{ + CPU: "cpu", + User: float64(k.Uutime_sec) + float64(k.Uutime_usec)/1000000, + System: float64(k.Ustime_sec) + float64(k.Ustime_usec)/1000000, + }, nil +} +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + pageSize, err := mem.GetPageSize() + if err != nil { + return nil, err + } + + return &MemoryInfoStat{ + RSS: uint64(k.Vm_rssize) * pageSize, + VMS: uint64(k.Vm_tsize) + uint64(k.Vm_dsize) + + uint64(k.Vm_ssize), + }, nil +} +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + return nil, err + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcess(pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) IsRunning() (bool, error) { + return p.IsRunningWithContext(context.Background()) +} + +func (p *Process) IsRunningWithContext(ctx context.Context) (bool, error) { + return true, common.ErrNotImplementedError +} +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + var ret []MemoryMapsStat + return &ret, common.ErrNotImplementedError +} + +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + results := []*Process{} + + buf, length, err := CallKernProcSyscall(KernProcAll, 0) + + if err != nil { + return results, err + } + + // get kinfo_proc size + count := int(length / uint64(sizeOfKinfoProc)) + + // parse buf to procs + for i := 0; i < count; i++ { + b := buf[i*sizeOfKinfoProc : (i+1)*sizeOfKinfoProc] + k, err := parseKinfoProc(b) + if err != nil { + continue + } + p, err := NewProcess(int32(k.Pid)) + if err != nil { + continue + } + + results = append(results, p) + } + + return results, nil +} + +func parseKinfoProc(buf []byte) (KinfoProc, error) { + var k KinfoProc + br := bytes.NewReader(buf) + err := common.Read(br, binary.LittleEndian, &k) + return k, err +} + +func (p *Process) getKProc() (*KinfoProc, error) { + return p.getKProcWithContext(context.Background()) +} + +func (p *Process) getKProcWithContext(ctx context.Context) (*KinfoProc, error) { + buf, length, err := CallKernProcSyscall(KernProcPID, p.Pid) + if err != nil { + return nil, err + } + if length != sizeOfKinfoProc { + return nil, err + } + + k, err := parseKinfoProc(buf) + if err != nil { + return nil, err + } + return &k, nil +} + +func NewProcess(pid int32) (*Process, error) { + p := &Process{Pid: pid} + + return p, nil +} + +func CallKernProcSyscall(op int32, arg int32) ([]byte, uint64, error) { + return CallKernProcSyscallWithContext(context.Background(), op, arg) +} + +func CallKernProcSyscallWithContext(ctx context.Context, op int32, arg int32) ([]byte, uint64, error) { + mib := []int32{CTLKern, KernProc, op, arg, sizeOfKinfoProc, 0} + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return nil, length, err + } + + count := int32(length / uint64(sizeOfKinfoProc)) + mib = []int32{CTLKern, KernProc, op, arg, sizeOfKinfoProc, count} + mibptr = unsafe.Pointer(&mib[0]) + miblen = uint64(len(mib)) + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/process/process_openbsd_amd64.go new file mode 100644 index 0000000..8607422 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_openbsd_amd64.go @@ -0,0 +1,200 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 66 + KernProcAll = 0 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 55 + KernProcArgv = 1 + KernProcEnv = 3 +) + +const ( + ArgMax = 256 * 1024 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x50 + sizeOfKinfoProc = 0x268 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SDEAD = 6 + SONPROC = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type KinfoProc struct { + Forw uint64 + Back uint64 + Paddr uint64 + Addr uint64 + Fd uint64 + Stats uint64 + Limit uint64 + Vmspace uint64 + Sigacts uint64 + Sess uint64 + Tsess uint64 + Ru uint64 + Eflag int32 + Exitsig int32 + Flag int32 + Pid int32 + Ppid int32 + Sid int32 + X_pgid int32 + Tpgid int32 + Uid uint32 + Ruid uint32 + Gid uint32 + Rgid uint32 + Groups [16]uint32 + Ngroups int16 + Jobc int16 + Tdev uint32 + Estcpu uint32 + Rtime_sec uint32 + Rtime_usec uint32 + Cpticks int32 + Pctcpu uint32 + Swtime uint32 + Slptime uint32 + Schedflags int32 + Uticks uint64 + Sticks uint64 + Iticks uint64 + Tracep uint64 + Traceflag int32 + Holdcnt int32 + Siglist int32 + Sigmask uint32 + Sigignore uint32 + Sigcatch uint32 + Stat int8 + Priority uint8 + Usrpri uint8 + Nice uint8 + Xstat uint16 + Acflag uint16 + Comm [24]int8 + Wmesg [8]int8 + Wchan uint64 + Login [32]int8 + Vm_rssize int32 + Vm_tsize int32 + Vm_dsize int32 + Vm_ssize int32 + Uvalid int64 + Ustart_sec uint64 + Ustart_usec uint32 + Uutime_sec uint32 + Uutime_usec uint32 + Ustime_sec uint32 + Ustime_usec uint32 + Pad_cgo_0 [4]byte + Uru_maxrss uint64 + Uru_ixrss uint64 + Uru_idrss uint64 + Uru_isrss uint64 + Uru_minflt uint64 + Uru_majflt uint64 + Uru_nswap uint64 + Uru_inblock uint64 + Uru_oublock uint64 + Uru_msgsnd uint64 + Uru_msgrcv uint64 + Uru_nsignals uint64 + Uru_nvcsw uint64 + Uru_nivcsw uint64 + Uctime_sec uint32 + Uctime_usec uint32 + Psflags int32 + Spare int32 + Svuid uint32 + Svgid uint32 + Emul [8]int8 + Rlim_rss_cur uint64 + Cpuid uint64 + Vm_map_size uint64 + Tid int32 + Rtableid uint32 +} + +type Priority struct{} + +type KinfoVmentry struct { + Start uint64 + End uint64 + Guard uint64 + Fspace uint64 + Fspace_augment uint64 + Offset uint64 + Wired_count int32 + Etype int32 + Protection int32 + Max_protection int32 + Advice int32 + Inheritance int32 + Flags uint8 + Pad_cgo_0 [7]byte +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_posix.go b/vendor/github.com/shirou/gopsutil/process/process_posix.go new file mode 100644 index 0000000..8ffb6b7 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_posix.go @@ -0,0 +1,146 @@ +// +build linux freebsd openbsd darwin + +package process + +import ( + "context" + "os" + "os/user" + "path/filepath" + "strconv" + "strings" + "syscall" + + "golang.org/x/sys/unix" +) + +// POSIX +func getTerminalMap() (map[uint64]string, error) { + ret := make(map[uint64]string) + var termfiles []string + + d, err := os.Open("/dev") + if err != nil { + return nil, err + } + defer d.Close() + + devnames, err := d.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, devname := range devnames { + if strings.HasPrefix(devname, "/dev/tty") { + termfiles = append(termfiles, "/dev/tty/"+devname) + } + } + + var ptsnames []string + ptsd, err := os.Open("/dev/pts") + if err != nil { + ptsnames, _ = filepath.Glob("/dev/ttyp*") + if ptsnames == nil { + return nil, err + } + } + defer ptsd.Close() + + if ptsnames == nil { + defer ptsd.Close() + ptsnames, err = ptsd.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, ptsname := range ptsnames { + termfiles = append(termfiles, "/dev/pts/"+ptsname) + } + } else { + termfiles = ptsnames + } + + for _, name := range termfiles { + stat := unix.Stat_t{} + if err = unix.Stat(name, &stat); err != nil { + return nil, err + } + rdev := uint64(stat.Rdev) + ret[rdev] = strings.Replace(name, "/dev", "", -1) + } + return ret, nil +} + +// SendSignal sends a unix.Signal to the process. +// Currently, SIGSTOP, SIGCONT, SIGTERM and SIGKILL are supported. +func (p *Process) SendSignal(sig syscall.Signal) error { + return p.SendSignalWithContext(context.Background(), sig) +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { + process, err := os.FindProcess(int(p.Pid)) + if err != nil { + return err + } + + err = process.Signal(sig) + if err != nil { + return err + } + + return nil +} + +// Suspend sends SIGSTOP to the process. +func (p *Process) Suspend() error { + return p.SuspendWithContext(context.Background()) +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + return p.SendSignal(unix.SIGSTOP) +} + +// Resume sends SIGCONT to the process. +func (p *Process) Resume() error { + return p.ResumeWithContext(context.Background()) +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + return p.SendSignal(unix.SIGCONT) +} + +// Terminate sends SIGTERM to the process. +func (p *Process) Terminate() error { + return p.TerminateWithContext(context.Background()) +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + return p.SendSignal(unix.SIGTERM) +} + +// Kill sends SIGKILL to the process. +func (p *Process) Kill() error { + return p.KillWithContext(context.Background()) +} + +func (p *Process) KillWithContext(ctx context.Context) error { + return p.SendSignal(unix.SIGKILL) +} + +// Username returns a username of the process. +func (p *Process) Username() (string, error) { + return p.UsernameWithContext(context.Background()) +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + uids, err := p.Uids() + if err != nil { + return "", err + } + if len(uids) > 0 { + u, err := user.LookupId(strconv.Itoa(int(uids[0]))) + if err != nil { + return "", err + } + return u.Username, nil + } + return "", nil +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_windows.go b/vendor/github.com/shirou/gopsutil/process/process_windows.go new file mode 100644 index 0000000..52bec7b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_windows.go @@ -0,0 +1,809 @@ +// +build windows + +package process + +import ( + "context" + "fmt" + "os" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/StackExchange/wmi" + cpu "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/internal/common" + net "github.com/shirou/gopsutil/net" + "github.com/shirou/w32" + "golang.org/x/sys/windows" +) + +const ( + NoMoreFiles = 0x12 + MaxPathLength = 260 +) + +var ( + modpsapi = windows.NewLazySystemDLL("psapi.dll") + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + + advapi32 = windows.NewLazySystemDLL("advapi32.dll") + procLookupPrivilegeValue = advapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = advapi32.NewProc("AdjustTokenPrivileges") +) + +type SystemProcessInformation struct { + NextEntryOffset uint64 + NumberOfThreads uint64 + Reserved1 [48]byte + Reserved2 [3]byte + UniqueProcessID uintptr + Reserved3 uintptr + HandleCount uint64 + Reserved4 [4]byte + Reserved5 [11]byte + PeakPagefileUsage uint64 + PrivatePageCount uint64 + Reserved6 [6]uint64 +} + +// Memory_info_ex is different between OSes +type MemoryInfoExStat struct { +} + +type MemoryMapsStat struct { +} + +type Win32_Process struct { + Name string + ExecutablePath *string + CommandLine *string + Priority uint32 + CreationDate *time.Time + ProcessID uint32 + ThreadCount uint32 + Status *string + ReadOperationCount uint64 + ReadTransferCount uint64 + WriteOperationCount uint64 + WriteTransferCount uint64 + CSCreationClassName string + CSName string + Caption *string + CreationClassName string + Description *string + ExecutionState *uint16 + HandleCount uint32 + KernelModeTime uint64 + MaximumWorkingSetSize *uint32 + MinimumWorkingSetSize *uint32 + OSCreationClassName string + OSName string + OtherOperationCount uint64 + OtherTransferCount uint64 + PageFaults uint32 + PageFileUsage uint32 + ParentProcessID uint32 + PeakPageFileUsage uint32 + PeakVirtualSize uint64 + PeakWorkingSetSize uint32 + PrivatePageCount uint64 + TerminationDate *time.Time + UserModeTime uint64 + WorkingSetSize uint64 +} + +type winLUID struct { + LowPart winDWord + HighPart winLong +} + +// LUID_AND_ATTRIBUTES +type winLUIDAndAttributes struct { + Luid winLUID + Attributes winDWord +} + +// TOKEN_PRIVILEGES +type winTokenPriviledges struct { + PrivilegeCount winDWord + Privileges [1]winLUIDAndAttributes +} + +type winLong int32 +type winDWord uint32 + +func init() { + wmi.DefaultClient.AllowMissingFields = true + + // enable SeDebugPrivilege https://github.com/midstar/proci/blob/6ec79f57b90ba3d9efa2a7b16ef9c9369d4be875/proci_windows.go#L80-L119 + handle, err := syscall.GetCurrentProcess() + if err != nil { + return + } + + var token syscall.Token + err = syscall.OpenProcessToken(handle, 0x0028, &token) + if err != nil { + return + } + defer token.Close() + + tokenPriviledges := winTokenPriviledges{PrivilegeCount: 1} + lpName := syscall.StringToUTF16("SeDebugPrivilege") + ret, _, _ := procLookupPrivilegeValue.Call( + 0, + uintptr(unsafe.Pointer(&lpName[0])), + uintptr(unsafe.Pointer(&tokenPriviledges.Privileges[0].Luid))) + if ret == 0 { + return + } + + tokenPriviledges.Privileges[0].Attributes = 0x00000002 // SE_PRIVILEGE_ENABLED + + procAdjustTokenPrivileges.Call( + uintptr(token), + 0, + uintptr(unsafe.Pointer(&tokenPriviledges)), + uintptr(unsafe.Sizeof(tokenPriviledges)), + 0, + 0) +} + +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + // inspired by https://gist.github.com/henkman/3083408 + // and https://github.com/giampaolo/psutil/blob/1c3a15f637521ba5c0031283da39c733fda53e4c/psutil/arch/windows/process_info.c#L315-L329 + var ret []int32 + var read uint32 = 0 + var psSize uint32 = 1024 + const dwordSize uint32 = 4 + + for { + ps := make([]uint32, psSize) + if !w32.EnumProcesses(ps, uint32(len(ps)), &read) { + return nil, fmt.Errorf("could not get w32.EnumProcesses") + } + if uint32(len(ps)) == read { // ps buffer was too small to host every results, retry with a bigger one + psSize += 1024 + continue + } + for _, pid := range ps[:read/dwordSize] { + ret = append(ret, int32(pid)) + } + return ret, nil + + } + +} + +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + ppid, _, _, err := getFromSnapProcess(p.Pid) + if err != nil { + return 0, err + } + return ppid, nil +} + +func GetWin32Proc(pid int32) ([]Win32_Process, error) { + return GetWin32ProcWithContext(context.Background(), pid) +} + +func GetWin32ProcWithContext(ctx context.Context, pid int32) ([]Win32_Process, error) { + var dst []Win32_Process + query := fmt.Sprintf("WHERE ProcessId = %d", pid) + q := wmi.CreateQuery(&dst, query) + err := common.WMIQueryWithContext(ctx, q, &dst) + if err != nil { + return []Win32_Process{}, fmt.Errorf("could not get win32Proc: %s", err) + } + + if len(dst) == 0 { + return []Win32_Process{}, fmt.Errorf("could not get win32Proc: empty") + } + + return dst, nil +} + +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + _, _, name, err := getFromSnapProcess(p.Pid) + if err != nil { + return "", fmt.Errorf("could not get Name: %s", err) + } + return name, nil +} + +func (p *Process) Tgid() (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + if p.Pid != 0 { // 0 or null is the current process for CreateToolhelp32Snapshot + snap := w32.CreateToolhelp32Snapshot(w32.TH32CS_SNAPMODULE|w32.TH32CS_SNAPMODULE32, uint32(p.Pid)) + if snap != 0 { // don't report errors here, fallback to WMI instead + defer w32.CloseHandle(snap) + var me32 w32.MODULEENTRY32 + me32.Size = uint32(unsafe.Sizeof(me32)) + + if w32.Module32First(snap, &me32) { + szexepath := windows.UTF16ToString(me32.SzExePath[:]) + return szexepath, nil + } + } + } + dst, err := GetWin32ProcWithContext(ctx, p.Pid) + if err != nil { + return "", fmt.Errorf("could not get ExecutablePath: %s", err) + } + return *dst[0].ExecutablePath, nil +} + +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + dst, err := GetWin32ProcWithContext(ctx, p.Pid) + if err != nil { + return "", fmt.Errorf("could not get CommandLine: %s", err) + } + return *dst[0].CommandLine, nil +} + +// CmdlineSlice returns the command line arguments of the process as a slice with each +// element being an argument. This merely returns the CommandLine informations passed +// to the process split on the 0x20 ASCII character. +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + cmdline, err := p.CmdlineWithContext(ctx) + if err != nil { + return nil, err + } + return strings.Split(cmdline, " "), nil +} + +func (p *Process) CreateTime() (int64, error) { + return p.CreateTimeWithContext(context.Background()) +} + +func (p *Process) CreateTimeWithContext(ctx context.Context) (int64, error) { + ru, err := getRusage(p.Pid) + if err != nil { + return 0, fmt.Errorf("could not get CreationDate: %s", err) + } + + return ru.CreationTime.Nanoseconds() / 1000000, nil +} + +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + ppid, err := p.PpidWithContext(ctx) + if err != nil { + return nil, fmt.Errorf("could not get ParentProcessID: %s", err) + } + + return NewProcess(ppid) +} +func (p *Process) Status() (string, error) { + return p.StatusWithContext(context.Background()) +} + +func (p *Process) StatusWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) Username() (string, error) { + return p.UsernameWithContext(context.Background()) +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + pid := p.Pid + // 0x1000 is PROCESS_QUERY_LIMITED_INFORMATION + c, err := syscall.OpenProcess(0x1000, false, uint32(pid)) + if err != nil { + return "", err + } + defer syscall.CloseHandle(c) + + var token syscall.Token + err = syscall.OpenProcessToken(c, syscall.TOKEN_QUERY, &token) + if err != nil { + return "", err + } + defer token.Close() + tokenUser, err := token.GetTokenUser() + if err != nil { + return "", err + } + + user, domain, _, err := tokenUser.User.Sid.LookupAccount("") + return domain + "\\" + user, err +} + +func (p *Process) Uids() ([]int32, error) { + return p.UidsWithContext(context.Background()) +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { + var uids []int32 + + return uids, common.ErrNotImplementedError +} +func (p *Process) Gids() ([]int32, error) { + return p.GidsWithContext(context.Background()) +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { + var gids []int32 + return gids, common.ErrNotImplementedError +} +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +// Nice returns priority in Windows +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + dst, err := GetWin32ProcWithContext(ctx, p.Pid) + if err != nil { + return 0, fmt.Errorf("could not get Priority: %s", err) + } + return int32(dst[0].Priority), nil +} +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + var rlimit []RlimitStat + + return rlimit, common.ErrNotImplementedError +} +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + var rlimit []RlimitStat + + return rlimit, common.ErrNotImplementedError +} + +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + dst, err := GetWin32ProcWithContext(ctx, p.Pid) + if err != nil || len(dst) == 0 { + return nil, fmt.Errorf("could not get Win32Proc: %s", err) + } + ret := &IOCountersStat{ + ReadCount: uint64(dst[0].ReadOperationCount), + ReadBytes: uint64(dst[0].ReadTransferCount), + WriteCount: uint64(dst[0].WriteOperationCount), + WriteBytes: uint64(dst[0].WriteTransferCount), + } + + return ret, nil +} +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + dst, err := GetWin32ProcWithContext(ctx, p.Pid) + if err != nil { + return 0, fmt.Errorf("could not get ThreadCount: %s", err) + } + return int32(dst[0].ThreadCount), nil +} +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + ret := make(map[int32]*cpu.TimesStat) + return ret, common.ErrNotImplementedError +} +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + sysTimes, err := getProcessCPUTimes(p.Pid) + if err != nil { + return nil, err + } + + // User and kernel times are represented as a FILETIME structure + // which contains a 64-bit value representing the number of + // 100-nanosecond intervals since January 1, 1601 (UTC): + // http://msdn.microsoft.com/en-us/library/ms724284(VS.85).aspx + // To convert it into a float representing the seconds that the + // process has executed in user/kernel mode I borrowed the code + // below from psutil's _psutil_windows.c, and in turn from Python's + // Modules/posixmodule.c + + user := float64(sysTimes.UserTime.HighDateTime)*429.4967296 + float64(sysTimes.UserTime.LowDateTime)*1e-7 + kernel := float64(sysTimes.KernelTime.HighDateTime)*429.4967296 + float64(sysTimes.KernelTime.LowDateTime)*1e-7 + + return &cpu.TimesStat{ + User: user, + System: kernel, + }, nil +} +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + mem, err := getMemoryInfo(p.Pid) + if err != nil { + return nil, err + } + + ret := &MemoryInfoStat{ + RSS: uint64(mem.WorkingSetSize), + VMS: uint64(mem.PagefileUsage), + } + + return ret, nil +} +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + snap := w32.CreateToolhelp32Snapshot(w32.TH32CS_SNAPPROCESS, uint32(0)) + if snap == 0 { + return out, windows.GetLastError() + } + defer w32.CloseHandle(snap) + var pe32 w32.PROCESSENTRY32 + pe32.DwSize = uint32(unsafe.Sizeof(pe32)) + if w32.Process32First(snap, &pe32) == false { + return out, windows.GetLastError() + } + + if pe32.Th32ParentProcessID == uint32(p.Pid) { + p, err := NewProcess(int32(pe32.Th32ProcessID)) + if err == nil { + out = append(out, p) + } + } + + for w32.Process32Next(snap, &pe32) { + if pe32.Th32ParentProcessID == uint32(p.Pid) { + p, err := NewProcess(int32(pe32.Th32ProcessID)) + if err == nil { + out = append(out, p) + } + } + } + return out, nil +} + +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) { + return p.NetIOCountersWithContext(context.Background(), pernic) +} + +func (p *Process) NetIOCountersWithContext(ctx context.Context, pernic bool) ([]net.IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) IsRunning() (bool, error) { + return p.IsRunningWithContext(context.Background()) +} + +func (p *Process) IsRunningWithContext(ctx context.Context) (bool, error) { + return true, common.ErrNotImplementedError +} + +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + var ret []MemoryMapsStat + return &ret, common.ErrNotImplementedError +} + +func NewProcess(pid int32) (*Process, error) { + p := &Process{Pid: pid} + + return p, nil +} + +func (p *Process) SendSignal(sig windows.Signal) error { + return p.SendSignalWithContext(context.Background(), sig) +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig windows.Signal) error { + return common.ErrNotImplementedError +} + +func (p *Process) Suspend() error { + return p.SuspendWithContext(context.Background()) +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} +func (p *Process) Resume() error { + return p.ResumeWithContext(context.Background()) +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) Terminate() error { + return p.TerminateWithContext(context.Background()) +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + // PROCESS_TERMINATE = 0x0001 + proc := w32.OpenProcess(0x0001, false, uint32(p.Pid)) + ret := w32.TerminateProcess(proc, 0) + w32.CloseHandle(proc) + + if ret == false { + return windows.GetLastError() + } else { + return nil + } +} + +func (p *Process) Kill() error { + return p.KillWithContext(context.Background()) +} + +func (p *Process) KillWithContext(ctx context.Context) error { + process := os.Process{Pid: int(p.Pid)} + return process.Kill() +} + +func getFromSnapProcess(pid int32) (int32, int32, string, error) { + snap := w32.CreateToolhelp32Snapshot(w32.TH32CS_SNAPPROCESS, uint32(pid)) + if snap == 0 { + return 0, 0, "", windows.GetLastError() + } + defer w32.CloseHandle(snap) + var pe32 w32.PROCESSENTRY32 + pe32.DwSize = uint32(unsafe.Sizeof(pe32)) + if w32.Process32First(snap, &pe32) == false { + return 0, 0, "", windows.GetLastError() + } + + if pe32.Th32ProcessID == uint32(pid) { + szexe := windows.UTF16ToString(pe32.SzExeFile[:]) + return int32(pe32.Th32ParentProcessID), int32(pe32.CntThreads), szexe, nil + } + + for w32.Process32Next(snap, &pe32) { + if pe32.Th32ProcessID == uint32(pid) { + szexe := windows.UTF16ToString(pe32.SzExeFile[:]) + return int32(pe32.Th32ParentProcessID), int32(pe32.CntThreads), szexe, nil + } + } + return 0, 0, "", fmt.Errorf("Couldn't find pid: %d", pid) +} + +// Get processes +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, fmt.Errorf("could not get Processes %s", err) + } + + for _, pid := range pids { + p, err := NewProcess(pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +func getProcInfo(pid int32) (*SystemProcessInformation, error) { + initialBufferSize := uint64(0x4000) + bufferSize := initialBufferSize + buffer := make([]byte, bufferSize) + + var sysProcInfo SystemProcessInformation + ret, _, _ := common.ProcNtQuerySystemInformation.Call( + uintptr(unsafe.Pointer(&sysProcInfo)), + uintptr(unsafe.Pointer(&buffer[0])), + uintptr(unsafe.Pointer(&bufferSize)), + uintptr(unsafe.Pointer(&bufferSize))) + if ret != 0 { + return nil, windows.GetLastError() + } + + return &sysProcInfo, nil +} + +func getRusage(pid int32) (*windows.Rusage, error) { + var CPU windows.Rusage + + c, err := windows.OpenProcess(windows.PROCESS_QUERY_INFORMATION, false, uint32(pid)) + if err != nil { + return nil, err + } + defer windows.CloseHandle(c) + + if err := windows.GetProcessTimes(c, &CPU.CreationTime, &CPU.ExitTime, &CPU.KernelTime, &CPU.UserTime); err != nil { + return nil, err + } + + return &CPU, nil +} + +func getMemoryInfo(pid int32) (PROCESS_MEMORY_COUNTERS, error) { + var mem PROCESS_MEMORY_COUNTERS + // PROCESS_QUERY_LIMITED_INFORMATION is 0x1000 + c, err := windows.OpenProcess(0x1000, false, uint32(pid)) + if err != nil { + return mem, err + } + defer windows.CloseHandle(c) + if err := getProcessMemoryInfo(c, &mem); err != nil { + return mem, err + } + + return mem, err +} + +func getProcessMemoryInfo(h windows.Handle, mem *PROCESS_MEMORY_COUNTERS) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(h), uintptr(unsafe.Pointer(mem)), uintptr(unsafe.Sizeof(*mem))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +type SYSTEM_TIMES struct { + CreateTime syscall.Filetime + ExitTime syscall.Filetime + KernelTime syscall.Filetime + UserTime syscall.Filetime +} + +func getProcessCPUTimes(pid int32) (SYSTEM_TIMES, error) { + var times SYSTEM_TIMES + + // PROCESS_QUERY_LIMITED_INFORMATION is 0x1000 + h, err := windows.OpenProcess(0x1000, false, uint32(pid)) + if err != nil { + return times, err + } + defer windows.CloseHandle(h) + + err = syscall.GetProcessTimes( + syscall.Handle(h), + ×.CreateTime, + ×.ExitTime, + ×.KernelTime, + ×.UserTime, + ) + + return times, err +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_windows_386.go b/vendor/github.com/shirou/gopsutil/process/process_windows_386.go new file mode 100644 index 0000000..68f3153 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_windows_386.go @@ -0,0 +1,16 @@ +// +build windows + +package process + +type PROCESS_MEMORY_COUNTERS struct { + CB uint32 + PageFaultCount uint32 + PeakWorkingSetSize uint32 + WorkingSetSize uint32 + QuotaPeakPagedPoolUsage uint32 + QuotaPagedPoolUsage uint32 + QuotaPeakNonPagedPoolUsage uint32 + QuotaNonPagedPoolUsage uint32 + PagefileUsage uint32 + PeakPagefileUsage uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/process/process_windows_amd64.go b/vendor/github.com/shirou/gopsutil/process/process_windows_amd64.go new file mode 100644 index 0000000..df286df --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/process_windows_amd64.go @@ -0,0 +1,16 @@ +// +build windows + +package process + +type PROCESS_MEMORY_COUNTERS struct { + CB uint32 + PageFaultCount uint32 + PeakWorkingSetSize uint64 + WorkingSetSize uint64 + QuotaPeakPagedPoolUsage uint64 + QuotaPagedPoolUsage uint64 + QuotaPeakNonPagedPoolUsage uint64 + QuotaNonPagedPoolUsage uint64 + PagefileUsage uint64 + PeakPagefileUsage uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/process/types_darwin.go b/vendor/github.com/shirou/gopsutil/process/types_darwin.go new file mode 100644 index 0000000..21216cd --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/types_darwin.go @@ -0,0 +1,160 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Hand Writing +// - all pointer in ExternProc to uint64 + +// +build ignore + +/* +Input to cgo -godefs. +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ +// +godefs map struct_ [16]byte /* in6_addr */ + +package process + +/* +#define __DARWIN_UNIX03 0 +#define KERNEL +#define _DARWIN_USE_64_BIT_INODE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +struct ucred_queue { + struct ucred *tqe_next; + struct ucred **tqe_prev; + TRACEBUF +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type UGid_t C.gid_t + +type KinfoProc C.struct_kinfo_proc + +type Eproc C.struct_eproc + +type Proc C.struct_proc + +type Session C.struct_session + +type ucred C.struct_ucred + +type Uucred C.struct__ucred + +type Upcred C.struct__pcred + +type Vmspace C.struct_vmspace + +type Sigacts C.struct_sigacts + +type ExternProc C.struct_extern_proc + +type Itimerval C.struct_itimerval + +type Vnode C.struct_vnode + +type Pgrp C.struct_pgrp + +type UserStruct C.struct_user + +type Au_session C.struct_au_session + +type Posix_cred C.struct_posix_cred + +type Label C.struct_label + +type AuditinfoAddr C.struct_auditinfo_addr +type AuMask C.struct_au_mask +type AuTidAddr C.struct_au_tid_addr + +// TAILQ(ucred) +type UcredQueue C.struct_ucred_queue diff --git a/vendor/github.com/shirou/gopsutil/process/types_freebsd.go b/vendor/github.com/shirou/gopsutil/process/types_freebsd.go new file mode 100644 index 0000000..aa7b346 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/types_freebsd.go @@ -0,0 +1,95 @@ +// +build ignore + +// We still need editing by hands. +// go tool cgo -godefs types_freebsd.go | sed 's/\*int64/int64/' | sed 's/\*byte/int64/' > process_freebsd_amd64.go + +/* +Input to cgo -godefs. +*/ + +// +godefs map struct_pargs int64 /* pargs */ +// +godefs map struct_proc int64 /* proc */ +// +godefs map struct_user int64 /* user */ +// +godefs map struct_vnode int64 /* vnode */ +// +godefs map struct_vnode int64 /* vnode */ +// +godefs map struct_filedesc int64 /* filedesc */ +// +godefs map struct_vmspace int64 /* vmspace */ +// +godefs map struct_pcb int64 /* pcb */ +// +godefs map struct_thread int64 /* thread */ +// +godefs map struct___sigset [16]byte /* sigset */ + +package process + +/* +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + CTLKern = 1 // "high kernel": proc, limits + KernProc = 14 // struct: process entries + KernProcPID = 1 // by process id + KernProcProc = 8 // only return procs + KernProcPathname = 12 // path to executable + KernProcArgs = 7 // get/set arguments/proctitle +) + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +const ( + sizeOfKinfoVmentry = C.sizeof_struct_kinfo_vmentry + sizeOfKinfoProc = C.sizeof_struct_kinfo_proc +) + +// from sys/proc.h +const ( + SIDL = 1 /* Process being created by fork. */ + SRUN = 2 /* Currently runnable. */ + SSLEEP = 3 /* Sleeping on an address. */ + SSTOP = 4 /* Process debugging or suspension. */ + SZOMB = 5 /* Awaiting collection by parent. */ + SWAIT = 6 /* Waiting for interrupt. */ + SLOCK = 7 /* Blocked on a lock. */ +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type KinfoProc C.struct_kinfo_proc + +type Priority C.struct_priority + +type KinfoVmentry C.struct_kinfo_vmentry diff --git a/vendor/github.com/shirou/gopsutil/process/types_openbsd.go b/vendor/github.com/shirou/gopsutil/process/types_openbsd.go new file mode 100644 index 0000000..09ac590 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/process/types_openbsd.go @@ -0,0 +1,103 @@ +// +build ignore + +// We still need editing by hands. +// go tool cgo -godefs types_openbsd.go | sed 's/\*int64/int64/' | sed 's/\*byte/int64/' > process_openbsd_amd64.go + +/* +Input to cgo -godefs. +*/ + +// +godefs map struct_pargs int64 /* pargs */ +// +godefs map struct_proc int64 /* proc */ +// +godefs map struct_user int64 /* user */ +// +godefs map struct_vnode int64 /* vnode */ +// +godefs map struct_vnode int64 /* vnode */ +// +godefs map struct_filedesc int64 /* filedesc */ +// +godefs map struct_vmspace int64 /* vmspace */ +// +godefs map struct_pcb int64 /* pcb */ +// +godefs map struct_thread int64 /* thread */ +// +godefs map struct___sigset [16]byte /* sigset */ + +package process + +/* +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + CTLKern = 1 // "high kernel": proc, limits + KernProc = 66 // struct: process entries + KernProcAll = 0 + KernProcPID = 1 // by process id + KernProcProc = 8 // only return procs + KernProcPathname = 12 // path to executable + KernProcArgs = 55 // get/set arguments/proctitle + KernProcArgv = 1 + KernProcEnv = 3 +) + +const ( + ArgMax = 256 * 1024 // sys/syslimits.h:#define ARG_MAX +) + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong +) + +const ( + sizeOfKinfoVmentry = C.sizeof_struct_kinfo_vmentry + sizeOfKinfoProc = C.sizeof_struct_kinfo_proc +) + +// from sys/proc.h +const ( + SIDL = 1 /* Process being created by fork. */ + SRUN = 2 /* Currently runnable. */ + SSLEEP = 3 /* Sleeping on an address. */ + SSTOP = 4 /* Process debugging or suspension. */ + SZOMB = 5 /* Awaiting collection by parent. */ + SDEAD = 6 /* Thread is almost gone */ + SONPROC = 7 /* Thread is currently on a CPU. */ +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type KinfoProc C.struct_kinfo_proc + +type Priority C.struct_priority + +type KinfoVmentry C.struct_kinfo_vmentry diff --git a/vendor/github.com/shirou/w32/AUTHORS b/vendor/github.com/shirou/w32/AUTHORS new file mode 100644 index 0000000..c0785e8 --- /dev/null +++ b/vendor/github.com/shirou/w32/AUTHORS @@ -0,0 +1,16 @@ +# This is the official list of 'w32' authors for copyright purposes. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +# Contributors +# ============ + +Allen Dang +Benny Siegert +Bruno Bigras +Gerald Rosenberg +Michael Henke \ No newline at end of file diff --git a/vendor/github.com/shirou/w32/LICENSE b/vendor/github.com/shirou/w32/LICENSE new file mode 100644 index 0000000..9f36608 --- /dev/null +++ b/vendor/github.com/shirou/w32/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2010-2012 The w32 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The names of the authors may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/shirou/w32/README.md b/vendor/github.com/shirou/w32/README.md new file mode 100644 index 0000000..ed196e7 --- /dev/null +++ b/vendor/github.com/shirou/w32/README.md @@ -0,0 +1,33 @@ +About w32 +========== + +w32 is a wrapper of windows apis for the Go Programming Language. + +It wraps win32 apis to "Go style" to make them easier to use. + +Setup +===== + +1. Make sure you have a working Go installation and build environment, + see this go-nuts post for details: + http://groups.google.com/group/golang-nuts/msg/5c87630a84f4fd0c + + Updated versions of the Windows Go build are available here: + http://code.google.com/p/gomingw/downloads/list + +2. Create a "gopath" directory if you do not have one yet and set the + GOPATH variable accordingly. For example: + mkdir -p go-externals/src + export GOPATH=${PWD}/go-externals + +3. go get github.com/AllenDang/w32 + +4. go install github.com/AllenDang/w32... + +Contribute +========== + +Contributions in form of design, code, documentation, bug reporting or other +ways you see fit are very welcome. + +Thank You! diff --git a/vendor/github.com/shirou/w32/advapi32.go b/vendor/github.com/shirou/w32/advapi32.go new file mode 100644 index 0000000..35fd35a --- /dev/null +++ b/vendor/github.com/shirou/w32/advapi32.go @@ -0,0 +1,301 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "errors" + "fmt" + "syscall" + "unsafe" +) + +var ( + modadvapi32 = syscall.NewLazyDLL("advapi32.dll") + + procRegCreateKeyEx = modadvapi32.NewProc("RegCreateKeyExW") + procRegOpenKeyEx = modadvapi32.NewProc("RegOpenKeyExW") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegGetValue = modadvapi32.NewProc("RegGetValueW") + procRegEnumKeyEx = modadvapi32.NewProc("RegEnumKeyExW") + // procRegSetKeyValue = modadvapi32.NewProc("RegSetKeyValueW") + procRegSetValueEx = modadvapi32.NewProc("RegSetValueExW") + procOpenEventLog = modadvapi32.NewProc("OpenEventLogW") + procReadEventLog = modadvapi32.NewProc("ReadEventLogW") + procCloseEventLog = modadvapi32.NewProc("CloseEventLog") + procOpenSCManager = modadvapi32.NewProc("OpenSCManagerW") + procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procOpenService = modadvapi32.NewProc("OpenServiceW") + procStartService = modadvapi32.NewProc("StartServiceW") + procControlService = modadvapi32.NewProc("ControlService") +) + +func RegCreateKey(hKey HKEY, subKey string) HKEY { + var result HKEY + ret, _, _ := procRegCreateKeyEx.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(0), + uintptr(0), + uintptr(0), + uintptr(KEY_ALL_ACCESS), + uintptr(0), + uintptr(unsafe.Pointer(&result)), + uintptr(0)) + _ = ret + return result +} + +func RegOpenKeyEx(hKey HKEY, subKey string, samDesired uint32) HKEY { + var result HKEY + ret, _, _ := procRegOpenKeyEx.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(0), + uintptr(samDesired), + uintptr(unsafe.Pointer(&result))) + + if ret != ERROR_SUCCESS { + panic(fmt.Sprintf("RegOpenKeyEx(%d, %s, %d) failed", hKey, subKey, samDesired)) + } + return result +} + +func RegCloseKey(hKey HKEY) error { + var err error + ret, _, _ := procRegCloseKey.Call( + uintptr(hKey)) + + if ret != ERROR_SUCCESS { + err = errors.New("RegCloseKey failed") + } + return err +} + +func RegGetRaw(hKey HKEY, subKey string, value string) []byte { + var bufLen uint32 + var valptr unsafe.Pointer + if len(value) > 0 { + valptr = unsafe.Pointer(syscall.StringToUTF16Ptr(value)) + } + procRegGetValue.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(valptr), + uintptr(RRF_RT_ANY), + 0, + 0, + uintptr(unsafe.Pointer(&bufLen))) + + if bufLen == 0 { + return nil + } + + buf := make([]byte, bufLen) + ret, _, _ := procRegGetValue.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(valptr), + uintptr(RRF_RT_ANY), + 0, + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&bufLen))) + + if ret != ERROR_SUCCESS { + return nil + } + + return buf +} + +func RegSetBinary(hKey HKEY, subKey string, value []byte) (errno int) { + var lptr, vptr unsafe.Pointer + if len(subKey) > 0 { + lptr = unsafe.Pointer(syscall.StringToUTF16Ptr(subKey)) + } + if len(value) > 0 { + vptr = unsafe.Pointer(&value[0]) + } + ret, _, _ := procRegSetValueEx.Call( + uintptr(hKey), + uintptr(lptr), + uintptr(0), + uintptr(REG_BINARY), + uintptr(vptr), + uintptr(len(value))) + + return int(ret) +} + +func RegGetString(hKey HKEY, subKey string, value string) string { + var bufLen uint32 + procRegGetValue.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(value))), + uintptr(RRF_RT_REG_SZ), + 0, + 0, + uintptr(unsafe.Pointer(&bufLen))) + + if bufLen == 0 { + return "" + } + + buf := make([]uint16, bufLen) + ret, _, _ := procRegGetValue.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(value))), + uintptr(RRF_RT_REG_SZ), + 0, + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&bufLen))) + + if ret != ERROR_SUCCESS { + return "" + } + + return syscall.UTF16ToString(buf) +} + +/* +func RegSetKeyValue(hKey HKEY, subKey string, valueName string, dwType uint32, data uintptr, cbData uint16) (errno int) { + ret, _, _ := procRegSetKeyValue.Call( + uintptr(hKey), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(subKey))), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(valueName))), + uintptr(dwType), + data, + uintptr(cbData)) + + return int(ret) +} +*/ + +func RegEnumKeyEx(hKey HKEY, index uint32) string { + var bufLen uint32 = 255 + buf := make([]uint16, bufLen) + procRegEnumKeyEx.Call( + uintptr(hKey), + uintptr(index), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&bufLen)), + 0, + 0, + 0, + 0) + return syscall.UTF16ToString(buf) +} + +func OpenEventLog(servername string, sourcename string) HANDLE { + ret, _, _ := procOpenEventLog.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(servername))), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(sourcename)))) + + return HANDLE(ret) +} + +func ReadEventLog(eventlog HANDLE, readflags, recordoffset uint32, buffer []byte, numberofbytestoread uint32, bytesread, minnumberofbytesneeded *uint32) bool { + ret, _, _ := procReadEventLog.Call( + uintptr(eventlog), + uintptr(readflags), + uintptr(recordoffset), + uintptr(unsafe.Pointer(&buffer[0])), + uintptr(numberofbytestoread), + uintptr(unsafe.Pointer(bytesread)), + uintptr(unsafe.Pointer(minnumberofbytesneeded))) + + return ret != 0 +} + +func CloseEventLog(eventlog HANDLE) bool { + ret, _, _ := procCloseEventLog.Call( + uintptr(eventlog)) + + return ret != 0 +} + +func OpenSCManager(lpMachineName, lpDatabaseName string, dwDesiredAccess uint32) (HANDLE, error) { + var p1, p2 uintptr + if len(lpMachineName) > 0 { + p1 = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpMachineName))) + } + if len(lpDatabaseName) > 0 { + p2 = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpDatabaseName))) + } + ret, _, _ := procOpenSCManager.Call( + p1, + p2, + uintptr(dwDesiredAccess)) + + if ret == 0 { + return 0, syscall.GetLastError() + } + + return HANDLE(ret), nil +} + +func CloseServiceHandle(hSCObject HANDLE) error { + ret, _, _ := procCloseServiceHandle.Call(uintptr(hSCObject)) + if ret == 0 { + return syscall.GetLastError() + } + return nil +} + +func OpenService(hSCManager HANDLE, lpServiceName string, dwDesiredAccess uint32) (HANDLE, error) { + ret, _, _ := procOpenService.Call( + uintptr(hSCManager), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpServiceName))), + uintptr(dwDesiredAccess)) + + if ret == 0 { + return 0, syscall.GetLastError() + } + + return HANDLE(ret), nil +} + +func StartService(hService HANDLE, lpServiceArgVectors []string) error { + l := len(lpServiceArgVectors) + var ret uintptr + if l == 0 { + ret, _, _ = procStartService.Call( + uintptr(hService), + 0, + 0) + } else { + lpArgs := make([]uintptr, l) + for i := 0; i < l; i++ { + lpArgs[i] = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpServiceArgVectors[i]))) + } + + ret, _, _ = procStartService.Call( + uintptr(hService), + uintptr(l), + uintptr(unsafe.Pointer(&lpArgs[0]))) + } + + if ret == 0 { + return syscall.GetLastError() + } + + return nil +} + +func ControlService(hService HANDLE, dwControl uint32, lpServiceStatus *SERVICE_STATUS) bool { + if lpServiceStatus == nil { + panic("ControlService:lpServiceStatus cannot be nil") + } + + ret, _, _ := procControlService.Call( + uintptr(hService), + uintptr(dwControl), + uintptr(unsafe.Pointer(lpServiceStatus))) + + return ret != 0 +} diff --git a/vendor/github.com/shirou/w32/comctl32.go b/vendor/github.com/shirou/w32/comctl32.go new file mode 100644 index 0000000..5139558 --- /dev/null +++ b/vendor/github.com/shirou/w32/comctl32.go @@ -0,0 +1,111 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modcomctl32 = syscall.NewLazyDLL("comctl32.dll") + + procInitCommonControlsEx = modcomctl32.NewProc("InitCommonControlsEx") + procImageList_Create = modcomctl32.NewProc("ImageList_Create") + procImageList_Destroy = modcomctl32.NewProc("ImageList_Destroy") + procImageList_GetImageCount = modcomctl32.NewProc("ImageList_GetImageCount") + procImageList_SetImageCount = modcomctl32.NewProc("ImageList_SetImageCount") + procImageList_Add = modcomctl32.NewProc("ImageList_Add") + procImageList_ReplaceIcon = modcomctl32.NewProc("ImageList_ReplaceIcon") + procImageList_Remove = modcomctl32.NewProc("ImageList_Remove") + procTrackMouseEvent = modcomctl32.NewProc("_TrackMouseEvent") +) + +func InitCommonControlsEx(lpInitCtrls *INITCOMMONCONTROLSEX) bool { + ret, _, _ := procInitCommonControlsEx.Call( + uintptr(unsafe.Pointer(lpInitCtrls))) + + return ret != 0 +} + +func ImageList_Create(cx, cy int, flags uint, cInitial, cGrow int) HIMAGELIST { + ret, _, _ := procImageList_Create.Call( + uintptr(cx), + uintptr(cy), + uintptr(flags), + uintptr(cInitial), + uintptr(cGrow)) + + if ret == 0 { + panic("Create image list failed") + } + + return HIMAGELIST(ret) +} + +func ImageList_Destroy(himl HIMAGELIST) bool { + ret, _, _ := procImageList_Destroy.Call( + uintptr(himl)) + + return ret != 0 +} + +func ImageList_GetImageCount(himl HIMAGELIST) int { + ret, _, _ := procImageList_GetImageCount.Call( + uintptr(himl)) + + return int(ret) +} + +func ImageList_SetImageCount(himl HIMAGELIST, uNewCount uint) bool { + ret, _, _ := procImageList_SetImageCount.Call( + uintptr(himl), + uintptr(uNewCount)) + + return ret != 0 +} + +func ImageList_Add(himl HIMAGELIST, hbmImage, hbmMask HBITMAP) int { + ret, _, _ := procImageList_Add.Call( + uintptr(himl), + uintptr(hbmImage), + uintptr(hbmMask)) + + return int(ret) +} + +func ImageList_ReplaceIcon(himl HIMAGELIST, i int, hicon HICON) int { + ret, _, _ := procImageList_ReplaceIcon.Call( + uintptr(himl), + uintptr(i), + uintptr(hicon)) + + return int(ret) +} + +func ImageList_AddIcon(himl HIMAGELIST, hicon HICON) int { + return ImageList_ReplaceIcon(himl, -1, hicon) +} + +func ImageList_Remove(himl HIMAGELIST, i int) bool { + ret, _, _ := procImageList_Remove.Call( + uintptr(himl), + uintptr(i)) + + return ret != 0 +} + +func ImageList_RemoveAll(himl HIMAGELIST) bool { + return ImageList_Remove(himl, -1) +} + +func TrackMouseEvent(tme *TRACKMOUSEEVENT) bool { + ret, _, _ := procTrackMouseEvent.Call( + uintptr(unsafe.Pointer(tme))) + + return ret != 0 +} diff --git a/vendor/github.com/shirou/w32/comdlg32.go b/vendor/github.com/shirou/w32/comdlg32.go new file mode 100644 index 0000000..ad9f776 --- /dev/null +++ b/vendor/github.com/shirou/w32/comdlg32.go @@ -0,0 +1,40 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modcomdlg32 = syscall.NewLazyDLL("comdlg32.dll") + + procGetSaveFileName = modcomdlg32.NewProc("GetSaveFileNameW") + procGetOpenFileName = modcomdlg32.NewProc("GetOpenFileNameW") + procCommDlgExtendedError = modcomdlg32.NewProc("CommDlgExtendedError") +) + +func GetOpenFileName(ofn *OPENFILENAME) bool { + ret, _, _ := procGetOpenFileName.Call( + uintptr(unsafe.Pointer(ofn))) + + return ret != 0 +} + +func GetSaveFileName(ofn *OPENFILENAME) bool { + ret, _, _ := procGetSaveFileName.Call( + uintptr(unsafe.Pointer(ofn))) + + return ret != 0 +} + +func CommDlgExtendedError() uint { + ret, _, _ := procCommDlgExtendedError.Call() + + return uint(ret) +} diff --git a/vendor/github.com/shirou/w32/constants.go b/vendor/github.com/shirou/w32/constants.go new file mode 100644 index 0000000..62d2d4b --- /dev/null +++ b/vendor/github.com/shirou/w32/constants.go @@ -0,0 +1,2661 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package w32 + +const ( + FALSE = 0 + TRUE = 1 +) + +const ( + NO_ERROR = 0 + ERROR_SUCCESS = 0 + ERROR_FILE_NOT_FOUND = 2 + ERROR_PATH_NOT_FOUND = 3 + ERROR_ACCESS_DENIED = 5 + ERROR_INVALID_HANDLE = 6 + ERROR_BAD_FORMAT = 11 + ERROR_INVALID_NAME = 123 + ERROR_MORE_DATA = 234 + ERROR_NO_MORE_ITEMS = 259 + ERROR_INVALID_SERVICE_CONTROL = 1052 + ERROR_SERVICE_REQUEST_TIMEOUT = 1053 + ERROR_SERVICE_NO_THREAD = 1054 + ERROR_SERVICE_DATABASE_LOCKED = 1055 + ERROR_SERVICE_ALREADY_RUNNING = 1056 + ERROR_SERVICE_DISABLED = 1058 + ERROR_SERVICE_DOES_NOT_EXIST = 1060 + ERROR_SERVICE_CANNOT_ACCEPT_CTRL = 1061 + ERROR_SERVICE_NOT_ACTIVE = 1062 + ERROR_DATABASE_DOES_NOT_EXIST = 1065 + ERROR_SERVICE_DEPENDENCY_FAIL = 1068 + ERROR_SERVICE_LOGON_FAILED = 1069 + ERROR_SERVICE_MARKED_FOR_DELETE = 1072 + ERROR_SERVICE_DEPENDENCY_DELETED = 1075 +) + +const ( + SE_ERR_FNF = 2 + SE_ERR_PNF = 3 + SE_ERR_ACCESSDENIED = 5 + SE_ERR_OOM = 8 + SE_ERR_DLLNOTFOUND = 32 + SE_ERR_SHARE = 26 + SE_ERR_ASSOCINCOMPLETE = 27 + SE_ERR_DDETIMEOUT = 28 + SE_ERR_DDEFAIL = 29 + SE_ERR_DDEBUSY = 30 + SE_ERR_NOASSOC = 31 +) + +const ( + CW_USEDEFAULT = ^0x7fffffff +) + +// ShowWindow constants +const ( + SW_HIDE = 0 + SW_NORMAL = 1 + SW_SHOWNORMAL = 1 + SW_SHOWMINIMIZED = 2 + SW_MAXIMIZE = 3 + SW_SHOWMAXIMIZED = 3 + SW_SHOWNOACTIVATE = 4 + SW_SHOW = 5 + SW_MINIMIZE = 6 + SW_SHOWMINNOACTIVE = 7 + SW_SHOWNA = 8 + SW_RESTORE = 9 + SW_SHOWDEFAULT = 10 + SW_FORCEMINIMIZE = 11 +) + +// Window class styles +const ( + CS_VREDRAW = 0x00000001 + CS_HREDRAW = 0x00000002 + CS_KEYCVTWINDOW = 0x00000004 + CS_DBLCLKS = 0x00000008 + CS_OWNDC = 0x00000020 + CS_CLASSDC = 0x00000040 + CS_PARENTDC = 0x00000080 + CS_NOKEYCVT = 0x00000100 + CS_NOCLOSE = 0x00000200 + CS_SAVEBITS = 0x00000800 + CS_BYTEALIGNCLIENT = 0x00001000 + CS_BYTEALIGNWINDOW = 0x00002000 + CS_GLOBALCLASS = 0x00004000 + CS_IME = 0x00010000 + CS_DROPSHADOW = 0x00020000 +) + +// Predefined cursor constants +const ( + IDC_ARROW = 32512 + IDC_IBEAM = 32513 + IDC_WAIT = 32514 + IDC_CROSS = 32515 + IDC_UPARROW = 32516 + IDC_SIZENWSE = 32642 + IDC_SIZENESW = 32643 + IDC_SIZEWE = 32644 + IDC_SIZENS = 32645 + IDC_SIZEALL = 32646 + IDC_NO = 32648 + IDC_HAND = 32649 + IDC_APPSTARTING = 32650 + IDC_HELP = 32651 + IDC_ICON = 32641 + IDC_SIZE = 32640 +) + +// Predefined icon constants +const ( + IDI_APPLICATION = 32512 + IDI_HAND = 32513 + IDI_QUESTION = 32514 + IDI_EXCLAMATION = 32515 + IDI_ASTERISK = 32516 + IDI_WINLOGO = 32517 + IDI_WARNING = IDI_EXCLAMATION + IDI_ERROR = IDI_HAND + IDI_INFORMATION = IDI_ASTERISK +) + +// Button style constants +const ( + BS_3STATE = 5 + BS_AUTO3STATE = 6 + BS_AUTOCHECKBOX = 3 + BS_AUTORADIOBUTTON = 9 + BS_BITMAP = 128 + BS_BOTTOM = 0X800 + BS_CENTER = 0X300 + BS_CHECKBOX = 2 + BS_DEFPUSHBUTTON = 1 + BS_GROUPBOX = 7 + BS_ICON = 64 + BS_LEFT = 256 + BS_LEFTTEXT = 32 + BS_MULTILINE = 0X2000 + BS_NOTIFY = 0X4000 + BS_OWNERDRAW = 0XB + BS_PUSHBUTTON = 0 + BS_PUSHLIKE = 4096 + BS_RADIOBUTTON = 4 + BS_RIGHT = 512 + BS_RIGHTBUTTON = 32 + BS_TEXT = 0 + BS_TOP = 0X400 + BS_USERBUTTON = 8 + BS_VCENTER = 0XC00 + BS_FLAT = 0X8000 +) + +// Button state constants +const ( + BST_CHECKED = 1 + BST_INDETERMINATE = 2 + BST_UNCHECKED = 0 + BST_FOCUS = 8 + BST_PUSHED = 4 +) + +// Predefined brushes constants +const ( + COLOR_3DDKSHADOW = 21 + COLOR_3DFACE = 15 + COLOR_3DHILIGHT = 20 + COLOR_3DHIGHLIGHT = 20 + COLOR_3DLIGHT = 22 + COLOR_BTNHILIGHT = 20 + COLOR_3DSHADOW = 16 + COLOR_ACTIVEBORDER = 10 + COLOR_ACTIVECAPTION = 2 + COLOR_APPWORKSPACE = 12 + COLOR_BACKGROUND = 1 + COLOR_DESKTOP = 1 + COLOR_BTNFACE = 15 + COLOR_BTNHIGHLIGHT = 20 + COLOR_BTNSHADOW = 16 + COLOR_BTNTEXT = 18 + COLOR_CAPTIONTEXT = 9 + COLOR_GRAYTEXT = 17 + COLOR_HIGHLIGHT = 13 + COLOR_HIGHLIGHTTEXT = 14 + COLOR_INACTIVEBORDER = 11 + COLOR_INACTIVECAPTION = 3 + COLOR_INACTIVECAPTIONTEXT = 19 + COLOR_INFOBK = 24 + COLOR_INFOTEXT = 23 + COLOR_MENU = 4 + COLOR_MENUTEXT = 7 + COLOR_SCROLLBAR = 0 + COLOR_WINDOW = 5 + COLOR_WINDOWFRAME = 6 + COLOR_WINDOWTEXT = 8 + COLOR_HOTLIGHT = 26 + COLOR_GRADIENTACTIVECAPTION = 27 + COLOR_GRADIENTINACTIVECAPTION = 28 +) + +// Button message constants +const ( + BM_CLICK = 245 + BM_GETCHECK = 240 + BM_GETIMAGE = 246 + BM_GETSTATE = 242 + BM_SETCHECK = 241 + BM_SETIMAGE = 247 + BM_SETSTATE = 243 + BM_SETSTYLE = 244 +) + +// Button notifications +const ( + BN_CLICKED = 0 + BN_PAINT = 1 + BN_HILITE = 2 + BN_PUSHED = BN_HILITE + BN_UNHILITE = 3 + BN_UNPUSHED = BN_UNHILITE + BN_DISABLE = 4 + BN_DOUBLECLICKED = 5 + BN_DBLCLK = BN_DOUBLECLICKED + BN_SETFOCUS = 6 + BN_KILLFOCUS = 7 +) + +// GetWindowLong and GetWindowLongPtr constants +const ( + GWL_EXSTYLE = -20 + GWL_STYLE = -16 + GWL_WNDPROC = -4 + GWLP_WNDPROC = -4 + GWL_HINSTANCE = -6 + GWLP_HINSTANCE = -6 + GWL_HWNDPARENT = -8 + GWLP_HWNDPARENT = -8 + GWL_ID = -12 + GWLP_ID = -12 + GWL_USERDATA = -21 + GWLP_USERDATA = -21 +) + +// Window style constants +const ( + WS_OVERLAPPED = 0X00000000 + WS_POPUP = 0X80000000 + WS_CHILD = 0X40000000 + WS_MINIMIZE = 0X20000000 + WS_VISIBLE = 0X10000000 + WS_DISABLED = 0X08000000 + WS_CLIPSIBLINGS = 0X04000000 + WS_CLIPCHILDREN = 0X02000000 + WS_MAXIMIZE = 0X01000000 + WS_CAPTION = 0X00C00000 + WS_BORDER = 0X00800000 + WS_DLGFRAME = 0X00400000 + WS_VSCROLL = 0X00200000 + WS_HSCROLL = 0X00100000 + WS_SYSMENU = 0X00080000 + WS_THICKFRAME = 0X00040000 + WS_GROUP = 0X00020000 + WS_TABSTOP = 0X00010000 + WS_MINIMIZEBOX = 0X00020000 + WS_MAXIMIZEBOX = 0X00010000 + WS_TILED = 0X00000000 + WS_ICONIC = 0X20000000 + WS_SIZEBOX = 0X00040000 + WS_OVERLAPPEDWINDOW = 0X00000000 | 0X00C00000 | 0X00080000 | 0X00040000 | 0X00020000 | 0X00010000 + WS_POPUPWINDOW = 0X80000000 | 0X00800000 | 0X00080000 + WS_CHILDWINDOW = 0X40000000 +) + +// Extended window style constants +const ( + WS_EX_DLGMODALFRAME = 0X00000001 + WS_EX_NOPARENTNOTIFY = 0X00000004 + WS_EX_TOPMOST = 0X00000008 + WS_EX_ACCEPTFILES = 0X00000010 + WS_EX_TRANSPARENT = 0X00000020 + WS_EX_MDICHILD = 0X00000040 + WS_EX_TOOLWINDOW = 0X00000080 + WS_EX_WINDOWEDGE = 0X00000100 + WS_EX_CLIENTEDGE = 0X00000200 + WS_EX_CONTEXTHELP = 0X00000400 + WS_EX_RIGHT = 0X00001000 + WS_EX_LEFT = 0X00000000 + WS_EX_RTLREADING = 0X00002000 + WS_EX_LTRREADING = 0X00000000 + WS_EX_LEFTSCROLLBAR = 0X00004000 + WS_EX_RIGHTSCROLLBAR = 0X00000000 + WS_EX_CONTROLPARENT = 0X00010000 + WS_EX_STATICEDGE = 0X00020000 + WS_EX_APPWINDOW = 0X00040000 + WS_EX_OVERLAPPEDWINDOW = 0X00000100 | 0X00000200 + WS_EX_PALETTEWINDOW = 0X00000100 | 0X00000080 | 0X00000008 + WS_EX_LAYERED = 0X00080000 + WS_EX_NOINHERITLAYOUT = 0X00100000 + WS_EX_LAYOUTRTL = 0X00400000 + WS_EX_NOACTIVATE = 0X08000000 +) + +// Window message constants +const ( + WM_APP = 32768 + WM_ACTIVATE = 6 + WM_ACTIVATEAPP = 28 + WM_AFXFIRST = 864 + WM_AFXLAST = 895 + WM_ASKCBFORMATNAME = 780 + WM_CANCELJOURNAL = 75 + WM_CANCELMODE = 31 + WM_CAPTURECHANGED = 533 + WM_CHANGECBCHAIN = 781 + WM_CHAR = 258 + WM_CHARTOITEM = 47 + WM_CHILDACTIVATE = 34 + WM_CLEAR = 771 + WM_CLOSE = 16 + WM_COMMAND = 273 + WM_COMMNOTIFY = 68 /* OBSOLETE */ + WM_COMPACTING = 65 + WM_COMPAREITEM = 57 + WM_CONTEXTMENU = 123 + WM_COPY = 769 + WM_COPYDATA = 74 + WM_CREATE = 1 + WM_CTLCOLORBTN = 309 + WM_CTLCOLORDLG = 310 + WM_CTLCOLOREDIT = 307 + WM_CTLCOLORLISTBOX = 308 + WM_CTLCOLORMSGBOX = 306 + WM_CTLCOLORSCROLLBAR = 311 + WM_CTLCOLORSTATIC = 312 + WM_CUT = 768 + WM_DEADCHAR = 259 + WM_DELETEITEM = 45 + WM_DESTROY = 2 + WM_DESTROYCLIPBOARD = 775 + WM_DEVICECHANGE = 537 + WM_DEVMODECHANGE = 27 + WM_DISPLAYCHANGE = 126 + WM_DRAWCLIPBOARD = 776 + WM_DRAWITEM = 43 + WM_DROPFILES = 563 + WM_ENABLE = 10 + WM_ENDSESSION = 22 + WM_ENTERIDLE = 289 + WM_ENTERMENULOOP = 529 + WM_ENTERSIZEMOVE = 561 + WM_ERASEBKGND = 20 + WM_EXITMENULOOP = 530 + WM_EXITSIZEMOVE = 562 + WM_FONTCHANGE = 29 + WM_GETDLGCODE = 135 + WM_GETFONT = 49 + WM_GETHOTKEY = 51 + WM_GETICON = 127 + WM_GETMINMAXINFO = 36 + WM_GETTEXT = 13 + WM_GETTEXTLENGTH = 14 + WM_HANDHELDFIRST = 856 + WM_HANDHELDLAST = 863 + WM_HELP = 83 + WM_HOTKEY = 786 + WM_HSCROLL = 276 + WM_HSCROLLCLIPBOARD = 782 + WM_ICONERASEBKGND = 39 + WM_INITDIALOG = 272 + WM_INITMENU = 278 + WM_INITMENUPOPUP = 279 + WM_INPUT = 0X00FF + WM_INPUTLANGCHANGE = 81 + WM_INPUTLANGCHANGEREQUEST = 80 + WM_KEYDOWN = 256 + WM_KEYUP = 257 + WM_KILLFOCUS = 8 + WM_MDIACTIVATE = 546 + WM_MDICASCADE = 551 + WM_MDICREATE = 544 + WM_MDIDESTROY = 545 + WM_MDIGETACTIVE = 553 + WM_MDIICONARRANGE = 552 + WM_MDIMAXIMIZE = 549 + WM_MDINEXT = 548 + WM_MDIREFRESHMENU = 564 + WM_MDIRESTORE = 547 + WM_MDISETMENU = 560 + WM_MDITILE = 550 + WM_MEASUREITEM = 44 + WM_GETOBJECT = 0X003D + WM_CHANGEUISTATE = 0X0127 + WM_UPDATEUISTATE = 0X0128 + WM_QUERYUISTATE = 0X0129 + WM_UNINITMENUPOPUP = 0X0125 + WM_MENURBUTTONUP = 290 + WM_MENUCOMMAND = 0X0126 + WM_MENUGETOBJECT = 0X0124 + WM_MENUDRAG = 0X0123 + WM_APPCOMMAND = 0X0319 + WM_MENUCHAR = 288 + WM_MENUSELECT = 287 + WM_MOVE = 3 + WM_MOVING = 534 + WM_NCACTIVATE = 134 + WM_NCCALCSIZE = 131 + WM_NCCREATE = 129 + WM_NCDESTROY = 130 + WM_NCHITTEST = 132 + WM_NCLBUTTONDBLCLK = 163 + WM_NCLBUTTONDOWN = 161 + WM_NCLBUTTONUP = 162 + WM_NCMBUTTONDBLCLK = 169 + WM_NCMBUTTONDOWN = 167 + WM_NCMBUTTONUP = 168 + WM_NCXBUTTONDOWN = 171 + WM_NCXBUTTONUP = 172 + WM_NCXBUTTONDBLCLK = 173 + WM_NCMOUSEHOVER = 0X02A0 + WM_NCMOUSELEAVE = 0X02A2 + WM_NCMOUSEMOVE = 160 + WM_NCPAINT = 133 + WM_NCRBUTTONDBLCLK = 166 + WM_NCRBUTTONDOWN = 164 + WM_NCRBUTTONUP = 165 + WM_NEXTDLGCTL = 40 + WM_NEXTMENU = 531 + WM_NOTIFY = 78 + WM_NOTIFYFORMAT = 85 + WM_NULL = 0 + WM_PAINT = 15 + WM_PAINTCLIPBOARD = 777 + WM_PAINTICON = 38 + WM_PALETTECHANGED = 785 + WM_PALETTEISCHANGING = 784 + WM_PARENTNOTIFY = 528 + WM_PASTE = 770 + WM_PENWINFIRST = 896 + WM_PENWINLAST = 911 + WM_POWER = 72 + WM_POWERBROADCAST = 536 + WM_PRINT = 791 + WM_PRINTCLIENT = 792 + WM_QUERYDRAGICON = 55 + WM_QUERYENDSESSION = 17 + WM_QUERYNEWPALETTE = 783 + WM_QUERYOPEN = 19 + WM_QUEUESYNC = 35 + WM_QUIT = 18 + WM_RENDERALLFORMATS = 774 + WM_RENDERFORMAT = 773 + WM_SETCURSOR = 32 + WM_SETFOCUS = 7 + WM_SETFONT = 48 + WM_SETHOTKEY = 50 + WM_SETICON = 128 + WM_SETREDRAW = 11 + WM_SETTEXT = 12 + WM_SETTINGCHANGE = 26 + WM_SHOWWINDOW = 24 + WM_SIZE = 5 + WM_SIZECLIPBOARD = 779 + WM_SIZING = 532 + WM_SPOOLERSTATUS = 42 + WM_STYLECHANGED = 125 + WM_STYLECHANGING = 124 + WM_SYSCHAR = 262 + WM_SYSCOLORCHANGE = 21 + WM_SYSCOMMAND = 274 + WM_SYSDEADCHAR = 263 + WM_SYSKEYDOWN = 260 + WM_SYSKEYUP = 261 + WM_TCARD = 82 + WM_THEMECHANGED = 794 + WM_TIMECHANGE = 30 + WM_TIMER = 275 + WM_UNDO = 772 + WM_USER = 1024 + WM_USERCHANGED = 84 + WM_VKEYTOITEM = 46 + WM_VSCROLL = 277 + WM_VSCROLLCLIPBOARD = 778 + WM_WINDOWPOSCHANGED = 71 + WM_WINDOWPOSCHANGING = 70 + WM_WININICHANGE = 26 + WM_KEYFIRST = 256 + WM_KEYLAST = 264 + WM_SYNCPAINT = 136 + WM_MOUSEACTIVATE = 33 + WM_MOUSEMOVE = 512 + WM_LBUTTONDOWN = 513 + WM_LBUTTONUP = 514 + WM_LBUTTONDBLCLK = 515 + WM_RBUTTONDOWN = 516 + WM_RBUTTONUP = 517 + WM_RBUTTONDBLCLK = 518 + WM_MBUTTONDOWN = 519 + WM_MBUTTONUP = 520 + WM_MBUTTONDBLCLK = 521 + WM_MOUSEWHEEL = 522 + WM_MOUSEFIRST = 512 + WM_XBUTTONDOWN = 523 + WM_XBUTTONUP = 524 + WM_XBUTTONDBLCLK = 525 + WM_MOUSELAST = 525 + WM_MOUSEHOVER = 0X2A1 + WM_MOUSELEAVE = 0X2A3 + WM_CLIPBOARDUPDATE = 0x031D +) + +// WM_ACTIVATE +const ( + WA_INACTIVE = 0 + WA_ACTIVE = 1 + WA_CLICKACTIVE = 2 +) + +const LF_FACESIZE = 32 + +// Font weight constants +const ( + FW_DONTCARE = 0 + FW_THIN = 100 + FW_EXTRALIGHT = 200 + FW_ULTRALIGHT = FW_EXTRALIGHT + FW_LIGHT = 300 + FW_NORMAL = 400 + FW_REGULAR = 400 + FW_MEDIUM = 500 + FW_SEMIBOLD = 600 + FW_DEMIBOLD = FW_SEMIBOLD + FW_BOLD = 700 + FW_EXTRABOLD = 800 + FW_ULTRABOLD = FW_EXTRABOLD + FW_HEAVY = 900 + FW_BLACK = FW_HEAVY +) + +// Charset constants +const ( + ANSI_CHARSET = 0 + DEFAULT_CHARSET = 1 + SYMBOL_CHARSET = 2 + SHIFTJIS_CHARSET = 128 + HANGEUL_CHARSET = 129 + HANGUL_CHARSET = 129 + GB2312_CHARSET = 134 + CHINESEBIG5_CHARSET = 136 + GREEK_CHARSET = 161 + TURKISH_CHARSET = 162 + HEBREW_CHARSET = 177 + ARABIC_CHARSET = 178 + BALTIC_CHARSET = 186 + RUSSIAN_CHARSET = 204 + THAI_CHARSET = 222 + EASTEUROPE_CHARSET = 238 + OEM_CHARSET = 255 + JOHAB_CHARSET = 130 + VIETNAMESE_CHARSET = 163 + MAC_CHARSET = 77 +) + +// Font output precision constants +const ( + OUT_DEFAULT_PRECIS = 0 + OUT_STRING_PRECIS = 1 + OUT_CHARACTER_PRECIS = 2 + OUT_STROKE_PRECIS = 3 + OUT_TT_PRECIS = 4 + OUT_DEVICE_PRECIS = 5 + OUT_RASTER_PRECIS = 6 + OUT_TT_ONLY_PRECIS = 7 + OUT_OUTLINE_PRECIS = 8 + OUT_PS_ONLY_PRECIS = 10 +) + +// Font clipping precision constants +const ( + CLIP_DEFAULT_PRECIS = 0 + CLIP_CHARACTER_PRECIS = 1 + CLIP_STROKE_PRECIS = 2 + CLIP_MASK = 15 + CLIP_LH_ANGLES = 16 + CLIP_TT_ALWAYS = 32 + CLIP_EMBEDDED = 128 +) + +// Font output quality constants +const ( + DEFAULT_QUALITY = 0 + DRAFT_QUALITY = 1 + PROOF_QUALITY = 2 + NONANTIALIASED_QUALITY = 3 + ANTIALIASED_QUALITY = 4 + CLEARTYPE_QUALITY = 5 +) + +// Font pitch constants +const ( + DEFAULT_PITCH = 0 + FIXED_PITCH = 1 + VARIABLE_PITCH = 2 +) + +// Font family constants +const ( + FF_DECORATIVE = 80 + FF_DONTCARE = 0 + FF_MODERN = 48 + FF_ROMAN = 16 + FF_SCRIPT = 64 + FF_SWISS = 32 +) + +// DeviceCapabilities capabilities +const ( + DC_FIELDS = 1 + DC_PAPERS = 2 + DC_PAPERSIZE = 3 + DC_MINEXTENT = 4 + DC_MAXEXTENT = 5 + DC_BINS = 6 + DC_DUPLEX = 7 + DC_SIZE = 8 + DC_EXTRA = 9 + DC_VERSION = 10 + DC_DRIVER = 11 + DC_BINNAMES = 12 + DC_ENUMRESOLUTIONS = 13 + DC_FILEDEPENDENCIES = 14 + DC_TRUETYPE = 15 + DC_PAPERNAMES = 16 + DC_ORIENTATION = 17 + DC_COPIES = 18 + DC_BINADJUST = 19 + DC_EMF_COMPLIANT = 20 + DC_DATATYPE_PRODUCED = 21 + DC_COLLATE = 22 + DC_MANUFACTURER = 23 + DC_MODEL = 24 + DC_PERSONALITY = 25 + DC_PRINTRATE = 26 + DC_PRINTRATEUNIT = 27 + DC_PRINTERMEM = 28 + DC_MEDIAREADY = 29 + DC_STAPLE = 30 + DC_PRINTRATEPPM = 31 + DC_COLORDEVICE = 32 + DC_NUP = 33 + DC_MEDIATYPENAMES = 34 + DC_MEDIATYPES = 35 +) + +// GetDeviceCaps index constants +const ( + DRIVERVERSION = 0 + TECHNOLOGY = 2 + HORZSIZE = 4 + VERTSIZE = 6 + HORZRES = 8 + VERTRES = 10 + LOGPIXELSX = 88 + LOGPIXELSY = 90 + BITSPIXEL = 12 + PLANES = 14 + NUMBRUSHES = 16 + NUMPENS = 18 + NUMFONTS = 22 + NUMCOLORS = 24 + NUMMARKERS = 20 + ASPECTX = 40 + ASPECTY = 42 + ASPECTXY = 44 + PDEVICESIZE = 26 + CLIPCAPS = 36 + SIZEPALETTE = 104 + NUMRESERVED = 106 + COLORRES = 108 + PHYSICALWIDTH = 110 + PHYSICALHEIGHT = 111 + PHYSICALOFFSETX = 112 + PHYSICALOFFSETY = 113 + SCALINGFACTORX = 114 + SCALINGFACTORY = 115 + VREFRESH = 116 + DESKTOPHORZRES = 118 + DESKTOPVERTRES = 117 + BLTALIGNMENT = 119 + SHADEBLENDCAPS = 120 + COLORMGMTCAPS = 121 + RASTERCAPS = 38 + CURVECAPS = 28 + LINECAPS = 30 + POLYGONALCAPS = 32 + TEXTCAPS = 34 +) + +// GetDeviceCaps TECHNOLOGY constants +const ( + DT_PLOTTER = 0 + DT_RASDISPLAY = 1 + DT_RASPRINTER = 2 + DT_RASCAMERA = 3 + DT_CHARSTREAM = 4 + DT_METAFILE = 5 + DT_DISPFILE = 6 +) + +// GetDeviceCaps SHADEBLENDCAPS constants +const ( + SB_NONE = 0x00 + SB_CONST_ALPHA = 0x01 + SB_PIXEL_ALPHA = 0x02 + SB_PREMULT_ALPHA = 0x04 + SB_GRAD_RECT = 0x10 + SB_GRAD_TRI = 0x20 +) + +// GetDeviceCaps COLORMGMTCAPS constants +const ( + CM_NONE = 0x00 + CM_DEVICE_ICM = 0x01 + CM_GAMMA_RAMP = 0x02 + CM_CMYK_COLOR = 0x04 +) + +// GetDeviceCaps RASTERCAPS constants +const ( + RC_BANDING = 2 + RC_BITBLT = 1 + RC_BITMAP64 = 8 + RC_DI_BITMAP = 128 + RC_DIBTODEV = 512 + RC_FLOODFILL = 4096 + RC_GDI20_OUTPUT = 16 + RC_PALETTE = 256 + RC_SCALING = 4 + RC_STRETCHBLT = 2048 + RC_STRETCHDIB = 8192 + RC_DEVBITS = 0x8000 + RC_OP_DX_OUTPUT = 0x4000 +) + +// GetDeviceCaps CURVECAPS constants +const ( + CC_NONE = 0 + CC_CIRCLES = 1 + CC_PIE = 2 + CC_CHORD = 4 + CC_ELLIPSES = 8 + CC_WIDE = 16 + CC_STYLED = 32 + CC_WIDESTYLED = 64 + CC_INTERIORS = 128 + CC_ROUNDRECT = 256 +) + +// GetDeviceCaps LINECAPS constants +const ( + LC_NONE = 0 + LC_POLYLINE = 2 + LC_MARKER = 4 + LC_POLYMARKER = 8 + LC_WIDE = 16 + LC_STYLED = 32 + LC_WIDESTYLED = 64 + LC_INTERIORS = 128 +) + +// GetDeviceCaps POLYGONALCAPS constants +const ( + PC_NONE = 0 + PC_POLYGON = 1 + PC_POLYPOLYGON = 256 + PC_PATHS = 512 + PC_RECTANGLE = 2 + PC_WINDPOLYGON = 4 + PC_SCANLINE = 8 + PC_TRAPEZOID = 4 + PC_WIDE = 16 + PC_STYLED = 32 + PC_WIDESTYLED = 64 + PC_INTERIORS = 128 +) + +// GetDeviceCaps TEXTCAPS constants +const ( + TC_OP_CHARACTER = 1 + TC_OP_STROKE = 2 + TC_CP_STROKE = 4 + TC_CR_90 = 8 + TC_CR_ANY = 16 + TC_SF_X_YINDEP = 32 + TC_SA_DOUBLE = 64 + TC_SA_INTEGER = 128 + TC_SA_CONTIN = 256 + TC_EA_DOUBLE = 512 + TC_IA_ABLE = 1024 + TC_UA_ABLE = 2048 + TC_SO_ABLE = 4096 + TC_RA_ABLE = 8192 + TC_VA_ABLE = 16384 + TC_RESERVED = 32768 + TC_SCROLLBLT = 65536 +) + +// Static control styles +const ( + SS_BITMAP = 14 + SS_BLACKFRAME = 7 + SS_BLACKRECT = 4 + SS_CENTER = 1 + SS_CENTERIMAGE = 512 + SS_EDITCONTROL = 0x2000 + SS_ENHMETAFILE = 15 + SS_ETCHEDFRAME = 18 + SS_ETCHEDHORZ = 16 + SS_ETCHEDVERT = 17 + SS_GRAYFRAME = 8 + SS_GRAYRECT = 5 + SS_ICON = 3 + SS_LEFT = 0 + SS_LEFTNOWORDWRAP = 0xc + SS_NOPREFIX = 128 + SS_NOTIFY = 256 + SS_OWNERDRAW = 0xd + SS_REALSIZECONTROL = 0x040 + SS_REALSIZEIMAGE = 0x800 + SS_RIGHT = 2 + SS_RIGHTJUST = 0x400 + SS_SIMPLE = 11 + SS_SUNKEN = 4096 + SS_WHITEFRAME = 9 + SS_WHITERECT = 6 + SS_USERITEM = 10 + SS_TYPEMASK = 0x0000001F + SS_ENDELLIPSIS = 0x00004000 + SS_PATHELLIPSIS = 0x00008000 + SS_WORDELLIPSIS = 0x0000C000 + SS_ELLIPSISMASK = 0x0000C000 +) + +// Edit styles +const ( + ES_LEFT = 0x0000 + ES_CENTER = 0x0001 + ES_RIGHT = 0x0002 + ES_MULTILINE = 0x0004 + ES_UPPERCASE = 0x0008 + ES_LOWERCASE = 0x0010 + ES_PASSWORD = 0x0020 + ES_AUTOVSCROLL = 0x0040 + ES_AUTOHSCROLL = 0x0080 + ES_NOHIDESEL = 0x0100 + ES_OEMCONVERT = 0x0400 + ES_READONLY = 0x0800 + ES_WANTRETURN = 0x1000 + ES_NUMBER = 0x2000 +) + +// Edit notifications +const ( + EN_SETFOCUS = 0x0100 + EN_KILLFOCUS = 0x0200 + EN_CHANGE = 0x0300 + EN_UPDATE = 0x0400 + EN_ERRSPACE = 0x0500 + EN_MAXTEXT = 0x0501 + EN_HSCROLL = 0x0601 + EN_VSCROLL = 0x0602 + EN_ALIGN_LTR_EC = 0x0700 + EN_ALIGN_RTL_EC = 0x0701 +) + +// Edit messages +const ( + EM_GETSEL = 0x00B0 + EM_SETSEL = 0x00B1 + EM_GETRECT = 0x00B2 + EM_SETRECT = 0x00B3 + EM_SETRECTNP = 0x00B4 + EM_SCROLL = 0x00B5 + EM_LINESCROLL = 0x00B6 + EM_SCROLLCARET = 0x00B7 + EM_GETMODIFY = 0x00B8 + EM_SETMODIFY = 0x00B9 + EM_GETLINECOUNT = 0x00BA + EM_LINEINDEX = 0x00BB + EM_SETHANDLE = 0x00BC + EM_GETHANDLE = 0x00BD + EM_GETTHUMB = 0x00BE + EM_LINELENGTH = 0x00C1 + EM_REPLACESEL = 0x00C2 + EM_GETLINE = 0x00C4 + EM_LIMITTEXT = 0x00C5 + EM_CANUNDO = 0x00C6 + EM_UNDO = 0x00C7 + EM_FMTLINES = 0x00C8 + EM_LINEFROMCHAR = 0x00C9 + EM_SETTABSTOPS = 0x00CB + EM_SETPASSWORDCHAR = 0x00CC + EM_EMPTYUNDOBUFFER = 0x00CD + EM_GETFIRSTVISIBLELINE = 0x00CE + EM_SETREADONLY = 0x00CF + EM_SETWORDBREAKPROC = 0x00D0 + EM_GETWORDBREAKPROC = 0x00D1 + EM_GETPASSWORDCHAR = 0x00D2 + EM_SETMARGINS = 0x00D3 + EM_GETMARGINS = 0x00D4 + EM_SETLIMITTEXT = EM_LIMITTEXT + EM_GETLIMITTEXT = 0x00D5 + EM_POSFROMCHAR = 0x00D6 + EM_CHARFROMPOS = 0x00D7 + EM_SETIMESTATUS = 0x00D8 + EM_GETIMESTATUS = 0x00D9 + EM_SETCUEBANNER = 0x1501 + EM_GETCUEBANNER = 0x1502 +) + +const ( + CCM_FIRST = 0x2000 + CCM_LAST = CCM_FIRST + 0x200 + CCM_SETBKCOLOR = 8193 + CCM_SETCOLORSCHEME = 8194 + CCM_GETCOLORSCHEME = 8195 + CCM_GETDROPTARGET = 8196 + CCM_SETUNICODEFORMAT = 8197 + CCM_GETUNICODEFORMAT = 8198 + CCM_SETVERSION = 0x2007 + CCM_GETVERSION = 0x2008 + CCM_SETNOTIFYWINDOW = 0x2009 + CCM_SETWINDOWTHEME = 0x200b + CCM_DPISCALE = 0x200c +) + +// Common controls styles +const ( + CCS_TOP = 1 + CCS_NOMOVEY = 2 + CCS_BOTTOM = 3 + CCS_NORESIZE = 4 + CCS_NOPARENTALIGN = 8 + CCS_ADJUSTABLE = 32 + CCS_NODIVIDER = 64 + CCS_VERT = 128 + CCS_LEFT = 129 + CCS_NOMOVEX = 130 + CCS_RIGHT = 131 +) + +// ProgressBar messages +const ( + PROGRESS_CLASS = "msctls_progress32" + PBM_SETPOS = WM_USER + 2 + PBM_DELTAPOS = WM_USER + 3 + PBM_SETSTEP = WM_USER + 4 + PBM_STEPIT = WM_USER + 5 + PBM_SETRANGE32 = 1030 + PBM_GETRANGE = 1031 + PBM_GETPOS = 1032 + PBM_SETBARCOLOR = 1033 + PBM_SETBKCOLOR = CCM_SETBKCOLOR + PBS_SMOOTH = 1 + PBS_VERTICAL = 4 +) + +// GetOpenFileName and GetSaveFileName extended flags +const ( + OFN_EX_NOPLACESBAR = 0x00000001 +) + +// GetOpenFileName and GetSaveFileName flags +const ( + OFN_ALLOWMULTISELECT = 0x00000200 + OFN_CREATEPROMPT = 0x00002000 + OFN_DONTADDTORECENT = 0x02000000 + OFN_ENABLEHOOK = 0x00000020 + OFN_ENABLEINCLUDENOTIFY = 0x00400000 + OFN_ENABLESIZING = 0x00800000 + OFN_ENABLETEMPLATE = 0x00000040 + OFN_ENABLETEMPLATEHANDLE = 0x00000080 + OFN_EXPLORER = 0x00080000 + OFN_EXTENSIONDIFFERENT = 0x00000400 + OFN_FILEMUSTEXIST = 0x00001000 + OFN_FORCESHOWHIDDEN = 0x10000000 + OFN_HIDEREADONLY = 0x00000004 + OFN_LONGNAMES = 0x00200000 + OFN_NOCHANGEDIR = 0x00000008 + OFN_NODEREFERENCELINKS = 0x00100000 + OFN_NOLONGNAMES = 0x00040000 + OFN_NONETWORKBUTTON = 0x00020000 + OFN_NOREADONLYRETURN = 0x00008000 + OFN_NOTESTFILECREATE = 0x00010000 + OFN_NOVALIDATE = 0x00000100 + OFN_OVERWRITEPROMPT = 0x00000002 + OFN_PATHMUSTEXIST = 0x00000800 + OFN_READONLY = 0x00000001 + OFN_SHAREAWARE = 0x00004000 + OFN_SHOWHELP = 0x00000010 +) + +//SHBrowseForFolder flags +const ( + BIF_RETURNONLYFSDIRS = 0x00000001 + BIF_DONTGOBELOWDOMAIN = 0x00000002 + BIF_STATUSTEXT = 0x00000004 + BIF_RETURNFSANCESTORS = 0x00000008 + BIF_EDITBOX = 0x00000010 + BIF_VALIDATE = 0x00000020 + BIF_NEWDIALOGSTYLE = 0x00000040 + BIF_BROWSEINCLUDEURLS = 0x00000080 + BIF_USENEWUI = BIF_EDITBOX | BIF_NEWDIALOGSTYLE + BIF_UAHINT = 0x00000100 + BIF_NONEWFOLDERBUTTON = 0x00000200 + BIF_NOTRANSLATETARGETS = 0x00000400 + BIF_BROWSEFORCOMPUTER = 0x00001000 + BIF_BROWSEFORPRINTER = 0x00002000 + BIF_BROWSEINCLUDEFILES = 0x00004000 + BIF_SHAREABLE = 0x00008000 + BIF_BROWSEFILEJUNCTIONS = 0x00010000 +) + +//MessageBox flags +const ( + MB_OK = 0x00000000 + MB_OKCANCEL = 0x00000001 + MB_ABORTRETRYIGNORE = 0x00000002 + MB_YESNOCANCEL = 0x00000003 + MB_YESNO = 0x00000004 + MB_RETRYCANCEL = 0x00000005 + MB_CANCELTRYCONTINUE = 0x00000006 + MB_ICONHAND = 0x00000010 + MB_ICONQUESTION = 0x00000020 + MB_ICONEXCLAMATION = 0x00000030 + MB_ICONASTERISK = 0x00000040 + MB_USERICON = 0x00000080 + MB_ICONWARNING = MB_ICONEXCLAMATION + MB_ICONERROR = MB_ICONHAND + MB_ICONINFORMATION = MB_ICONASTERISK + MB_ICONSTOP = MB_ICONHAND + MB_DEFBUTTON1 = 0x00000000 + MB_DEFBUTTON2 = 0x00000100 + MB_DEFBUTTON3 = 0x00000200 + MB_DEFBUTTON4 = 0x00000300 +) + +//COM +const ( + E_INVALIDARG = 0x80070057 + E_OUTOFMEMORY = 0x8007000E + E_UNEXPECTED = 0x8000FFFF +) + +const ( + S_OK = 0 + S_FALSE = 0x0001 + RPC_E_CHANGED_MODE = 0x80010106 +) + +// GetSystemMetrics constants +const ( + SM_CXSCREEN = 0 + SM_CYSCREEN = 1 + SM_CXVSCROLL = 2 + SM_CYHSCROLL = 3 + SM_CYCAPTION = 4 + SM_CXBORDER = 5 + SM_CYBORDER = 6 + SM_CXDLGFRAME = 7 + SM_CYDLGFRAME = 8 + SM_CYVTHUMB = 9 + SM_CXHTHUMB = 10 + SM_CXICON = 11 + SM_CYICON = 12 + SM_CXCURSOR = 13 + SM_CYCURSOR = 14 + SM_CYMENU = 15 + SM_CXFULLSCREEN = 16 + SM_CYFULLSCREEN = 17 + SM_CYKANJIWINDOW = 18 + SM_MOUSEPRESENT = 19 + SM_CYVSCROLL = 20 + SM_CXHSCROLL = 21 + SM_DEBUG = 22 + SM_SWAPBUTTON = 23 + SM_RESERVED1 = 24 + SM_RESERVED2 = 25 + SM_RESERVED3 = 26 + SM_RESERVED4 = 27 + SM_CXMIN = 28 + SM_CYMIN = 29 + SM_CXSIZE = 30 + SM_CYSIZE = 31 + SM_CXFRAME = 32 + SM_CYFRAME = 33 + SM_CXMINTRACK = 34 + SM_CYMINTRACK = 35 + SM_CXDOUBLECLK = 36 + SM_CYDOUBLECLK = 37 + SM_CXICONSPACING = 38 + SM_CYICONSPACING = 39 + SM_MENUDROPALIGNMENT = 40 + SM_PENWINDOWS = 41 + SM_DBCSENABLED = 42 + SM_CMOUSEBUTTONS = 43 + SM_CXFIXEDFRAME = SM_CXDLGFRAME + SM_CYFIXEDFRAME = SM_CYDLGFRAME + SM_CXSIZEFRAME = SM_CXFRAME + SM_CYSIZEFRAME = SM_CYFRAME + SM_SECURE = 44 + SM_CXEDGE = 45 + SM_CYEDGE = 46 + SM_CXMINSPACING = 47 + SM_CYMINSPACING = 48 + SM_CXSMICON = 49 + SM_CYSMICON = 50 + SM_CYSMCAPTION = 51 + SM_CXSMSIZE = 52 + SM_CYSMSIZE = 53 + SM_CXMENUSIZE = 54 + SM_CYMENUSIZE = 55 + SM_ARRANGE = 56 + SM_CXMINIMIZED = 57 + SM_CYMINIMIZED = 58 + SM_CXMAXTRACK = 59 + SM_CYMAXTRACK = 60 + SM_CXMAXIMIZED = 61 + SM_CYMAXIMIZED = 62 + SM_NETWORK = 63 + SM_CLEANBOOT = 67 + SM_CXDRAG = 68 + SM_CYDRAG = 69 + SM_SHOWSOUNDS = 70 + SM_CXMENUCHECK = 71 + SM_CYMENUCHECK = 72 + SM_SLOWMACHINE = 73 + SM_MIDEASTENABLED = 74 + SM_MOUSEWHEELPRESENT = 75 + SM_XVIRTUALSCREEN = 76 + SM_YVIRTUALSCREEN = 77 + SM_CXVIRTUALSCREEN = 78 + SM_CYVIRTUALSCREEN = 79 + SM_CMONITORS = 80 + SM_SAMEDISPLAYFORMAT = 81 + SM_IMMENABLED = 82 + SM_CXFOCUSBORDER = 83 + SM_CYFOCUSBORDER = 84 + SM_TABLETPC = 86 + SM_MEDIACENTER = 87 + SM_STARTER = 88 + SM_SERVERR2 = 89 + SM_CMETRICS = 91 + SM_REMOTESESSION = 0x1000 + SM_SHUTTINGDOWN = 0x2000 + SM_REMOTECONTROL = 0x2001 + SM_CARETBLINKINGENABLED = 0x2002 +) + +const ( + CLSCTX_INPROC_SERVER = 1 + CLSCTX_INPROC_HANDLER = 2 + CLSCTX_LOCAL_SERVER = 4 + CLSCTX_INPROC_SERVER16 = 8 + CLSCTX_REMOTE_SERVER = 16 + CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER + CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER + CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER +) + +const ( + COINIT_APARTMENTTHREADED = 0x2 + COINIT_MULTITHREADED = 0x0 + COINIT_DISABLE_OLE1DDE = 0x4 + COINIT_SPEED_OVER_MEMORY = 0x8 +) + +const ( + DISPATCH_METHOD = 1 + DISPATCH_PROPERTYGET = 2 + DISPATCH_PROPERTYPUT = 4 + DISPATCH_PROPERTYPUTREF = 8 +) + +const ( + CC_FASTCALL = iota + CC_CDECL + CC_MSCPASCAL + CC_PASCAL = CC_MSCPASCAL + CC_MACPASCAL + CC_STDCALL + CC_FPFASTCALL + CC_SYSCALL + CC_MPWCDECL + CC_MPWPASCAL + CC_MAX = CC_MPWPASCAL +) + +const ( + VT_EMPTY = 0x0 + VT_NULL = 0x1 + VT_I2 = 0x2 + VT_I4 = 0x3 + VT_R4 = 0x4 + VT_R8 = 0x5 + VT_CY = 0x6 + VT_DATE = 0x7 + VT_BSTR = 0x8 + VT_DISPATCH = 0x9 + VT_ERROR = 0xa + VT_BOOL = 0xb + VT_VARIANT = 0xc + VT_UNKNOWN = 0xd + VT_DECIMAL = 0xe + VT_I1 = 0x10 + VT_UI1 = 0x11 + VT_UI2 = 0x12 + VT_UI4 = 0x13 + VT_I8 = 0x14 + VT_UI8 = 0x15 + VT_INT = 0x16 + VT_UINT = 0x17 + VT_VOID = 0x18 + VT_HRESULT = 0x19 + VT_PTR = 0x1a + VT_SAFEARRAY = 0x1b + VT_CARRAY = 0x1c + VT_USERDEFINED = 0x1d + VT_LPSTR = 0x1e + VT_LPWSTR = 0x1f + VT_RECORD = 0x24 + VT_INT_PTR = 0x25 + VT_UINT_PTR = 0x26 + VT_FILETIME = 0x40 + VT_BLOB = 0x41 + VT_STREAM = 0x42 + VT_STORAGE = 0x43 + VT_STREAMED_OBJECT = 0x44 + VT_STORED_OBJECT = 0x45 + VT_BLOB_OBJECT = 0x46 + VT_CF = 0x47 + VT_CLSID = 0x48 + VT_BSTR_BLOB = 0xfff + VT_VECTOR = 0x1000 + VT_ARRAY = 0x2000 + VT_BYREF = 0x4000 + VT_RESERVED = 0x8000 + VT_ILLEGAL = 0xffff + VT_ILLEGALMASKED = 0xfff + VT_TYPEMASK = 0xfff +) + +const ( + DISPID_UNKNOWN = -1 + DISPID_VALUE = 0 + DISPID_PROPERTYPUT = -3 + DISPID_NEWENUM = -4 + DISPID_EVALUATE = -5 + DISPID_CONSTRUCTOR = -6 + DISPID_DESTRUCTOR = -7 + DISPID_COLLECT = -8 +) + +const ( + MONITOR_DEFAULTTONULL = 0x00000000 + MONITOR_DEFAULTTOPRIMARY = 0x00000001 + MONITOR_DEFAULTTONEAREST = 0x00000002 + + MONITORINFOF_PRIMARY = 0x00000001 +) + +const ( + CCHDEVICENAME = 32 + CCHFORMNAME = 32 +) + +const ( + IDOK = 1 + IDCANCEL = 2 + IDABORT = 3 + IDRETRY = 4 + IDIGNORE = 5 + IDYES = 6 + IDNO = 7 + IDCLOSE = 8 + IDHELP = 9 + IDTRYAGAIN = 10 + IDCONTINUE = 11 + IDTIMEOUT = 32000 +) + +// Generic WM_NOTIFY notification codes +const ( + NM_FIRST = 0 + NM_OUTOFMEMORY = NM_FIRST - 1 + NM_CLICK = NM_FIRST - 2 + NM_DBLCLK = NM_FIRST - 3 + NM_RETURN = NM_FIRST - 4 + NM_RCLICK = NM_FIRST - 5 + NM_RDBLCLK = NM_FIRST - 6 + NM_SETFOCUS = NM_FIRST - 7 + NM_KILLFOCUS = NM_FIRST - 8 + NM_CUSTOMDRAW = NM_FIRST - 12 + NM_HOVER = NM_FIRST - 13 + NM_NCHITTEST = NM_FIRST - 14 + NM_KEYDOWN = NM_FIRST - 15 + NM_RELEASEDCAPTURE = NM_FIRST - 16 + NM_SETCURSOR = NM_FIRST - 17 + NM_CHAR = NM_FIRST - 18 + NM_TOOLTIPSCREATED = NM_FIRST - 19 + NM_LAST = NM_FIRST - 99 +) + +// ListView messages +const ( + LVM_FIRST = 0x1000 + LVM_GETITEMCOUNT = LVM_FIRST + 4 + LVM_SETIMAGELIST = LVM_FIRST + 3 + LVM_GETIMAGELIST = LVM_FIRST + 2 + LVM_GETITEM = LVM_FIRST + 75 + LVM_SETITEM = LVM_FIRST + 76 + LVM_INSERTITEM = LVM_FIRST + 77 + LVM_DELETEITEM = LVM_FIRST + 8 + LVM_DELETEALLITEMS = LVM_FIRST + 9 + LVM_GETCALLBACKMASK = LVM_FIRST + 10 + LVM_SETCALLBACKMASK = LVM_FIRST + 11 + LVM_SETUNICODEFORMAT = CCM_SETUNICODEFORMAT + LVM_GETNEXTITEM = LVM_FIRST + 12 + LVM_FINDITEM = LVM_FIRST + 83 + LVM_GETITEMRECT = LVM_FIRST + 14 + LVM_GETSTRINGWIDTH = LVM_FIRST + 87 + LVM_HITTEST = LVM_FIRST + 18 + LVM_ENSUREVISIBLE = LVM_FIRST + 19 + LVM_SCROLL = LVM_FIRST + 20 + LVM_REDRAWITEMS = LVM_FIRST + 21 + LVM_ARRANGE = LVM_FIRST + 22 + LVM_EDITLABEL = LVM_FIRST + 118 + LVM_GETEDITCONTROL = LVM_FIRST + 24 + LVM_GETCOLUMN = LVM_FIRST + 95 + LVM_SETCOLUMN = LVM_FIRST + 96 + LVM_INSERTCOLUMN = LVM_FIRST + 97 + LVM_DELETECOLUMN = LVM_FIRST + 28 + LVM_GETCOLUMNWIDTH = LVM_FIRST + 29 + LVM_SETCOLUMNWIDTH = LVM_FIRST + 30 + LVM_GETHEADER = LVM_FIRST + 31 + LVM_CREATEDRAGIMAGE = LVM_FIRST + 33 + LVM_GETVIEWRECT = LVM_FIRST + 34 + LVM_GETTEXTCOLOR = LVM_FIRST + 35 + LVM_SETTEXTCOLOR = LVM_FIRST + 36 + LVM_GETTEXTBKCOLOR = LVM_FIRST + 37 + LVM_SETTEXTBKCOLOR = LVM_FIRST + 38 + LVM_GETTOPINDEX = LVM_FIRST + 39 + LVM_GETCOUNTPERPAGE = LVM_FIRST + 40 + LVM_GETORIGIN = LVM_FIRST + 41 + LVM_UPDATE = LVM_FIRST + 42 + LVM_SETITEMSTATE = LVM_FIRST + 43 + LVM_GETITEMSTATE = LVM_FIRST + 44 + LVM_GETITEMTEXT = LVM_FIRST + 115 + LVM_SETITEMTEXT = LVM_FIRST + 116 + LVM_SETITEMCOUNT = LVM_FIRST + 47 + LVM_SORTITEMS = LVM_FIRST + 48 + LVM_SETITEMPOSITION32 = LVM_FIRST + 49 + LVM_GETSELECTEDCOUNT = LVM_FIRST + 50 + LVM_GETITEMSPACING = LVM_FIRST + 51 + LVM_GETISEARCHSTRING = LVM_FIRST + 117 + LVM_SETICONSPACING = LVM_FIRST + 53 + LVM_SETEXTENDEDLISTVIEWSTYLE = LVM_FIRST + 54 + LVM_GETEXTENDEDLISTVIEWSTYLE = LVM_FIRST + 55 + LVM_GETSUBITEMRECT = LVM_FIRST + 56 + LVM_SUBITEMHITTEST = LVM_FIRST + 57 + LVM_SETCOLUMNORDERARRAY = LVM_FIRST + 58 + LVM_GETCOLUMNORDERARRAY = LVM_FIRST + 59 + LVM_SETHOTITEM = LVM_FIRST + 60 + LVM_GETHOTITEM = LVM_FIRST + 61 + LVM_SETHOTCURSOR = LVM_FIRST + 62 + LVM_GETHOTCURSOR = LVM_FIRST + 63 + LVM_APPROXIMATEVIEWRECT = LVM_FIRST + 64 + LVM_SETWORKAREAS = LVM_FIRST + 65 + LVM_GETWORKAREAS = LVM_FIRST + 70 + LVM_GETNUMBEROFWORKAREAS = LVM_FIRST + 73 + LVM_GETSELECTIONMARK = LVM_FIRST + 66 + LVM_SETSELECTIONMARK = LVM_FIRST + 67 + LVM_SETHOVERTIME = LVM_FIRST + 71 + LVM_GETHOVERTIME = LVM_FIRST + 72 + LVM_SETTOOLTIPS = LVM_FIRST + 74 + LVM_GETTOOLTIPS = LVM_FIRST + 78 + LVM_SORTITEMSEX = LVM_FIRST + 81 + LVM_SETBKIMAGE = LVM_FIRST + 138 + LVM_GETBKIMAGE = LVM_FIRST + 139 + LVM_SETSELECTEDCOLUMN = LVM_FIRST + 140 + LVM_SETVIEW = LVM_FIRST + 142 + LVM_GETVIEW = LVM_FIRST + 143 + LVM_INSERTGROUP = LVM_FIRST + 145 + LVM_SETGROUPINFO = LVM_FIRST + 147 + LVM_GETGROUPINFO = LVM_FIRST + 149 + LVM_REMOVEGROUP = LVM_FIRST + 150 + LVM_MOVEGROUP = LVM_FIRST + 151 + LVM_GETGROUPCOUNT = LVM_FIRST + 152 + LVM_GETGROUPINFOBYINDEX = LVM_FIRST + 153 + LVM_MOVEITEMTOGROUP = LVM_FIRST + 154 + LVM_GETGROUPRECT = LVM_FIRST + 98 + LVM_SETGROUPMETRICS = LVM_FIRST + 155 + LVM_GETGROUPMETRICS = LVM_FIRST + 156 + LVM_ENABLEGROUPVIEW = LVM_FIRST + 157 + LVM_SORTGROUPS = LVM_FIRST + 158 + LVM_INSERTGROUPSORTED = LVM_FIRST + 159 + LVM_REMOVEALLGROUPS = LVM_FIRST + 160 + LVM_HASGROUP = LVM_FIRST + 161 + LVM_GETGROUPSTATE = LVM_FIRST + 92 + LVM_GETFOCUSEDGROUP = LVM_FIRST + 93 + LVM_SETTILEVIEWINFO = LVM_FIRST + 162 + LVM_GETTILEVIEWINFO = LVM_FIRST + 163 + LVM_SETTILEINFO = LVM_FIRST + 164 + LVM_GETTILEINFO = LVM_FIRST + 165 + LVM_SETINSERTMARK = LVM_FIRST + 166 + LVM_GETINSERTMARK = LVM_FIRST + 167 + LVM_INSERTMARKHITTEST = LVM_FIRST + 168 + LVM_GETINSERTMARKRECT = LVM_FIRST + 169 + LVM_SETINSERTMARKCOLOR = LVM_FIRST + 170 + LVM_GETINSERTMARKCOLOR = LVM_FIRST + 171 + LVM_SETINFOTIP = LVM_FIRST + 173 + LVM_GETSELECTEDCOLUMN = LVM_FIRST + 174 + LVM_ISGROUPVIEWENABLED = LVM_FIRST + 175 + LVM_GETOUTLINECOLOR = LVM_FIRST + 176 + LVM_SETOUTLINECOLOR = LVM_FIRST + 177 + LVM_CANCELEDITLABEL = LVM_FIRST + 179 + LVM_MAPINDEXTOID = LVM_FIRST + 180 + LVM_MAPIDTOINDEX = LVM_FIRST + 181 + LVM_ISITEMVISIBLE = LVM_FIRST + 182 + LVM_GETNEXTITEMINDEX = LVM_FIRST + 211 +) + +// ListView notifications +const ( + LVN_FIRST = -100 + + LVN_ITEMCHANGING = LVN_FIRST - 0 + LVN_ITEMCHANGED = LVN_FIRST - 1 + LVN_INSERTITEM = LVN_FIRST - 2 + LVN_DELETEITEM = LVN_FIRST - 3 + LVN_DELETEALLITEMS = LVN_FIRST - 4 + LVN_BEGINLABELEDITA = LVN_FIRST - 5 + LVN_BEGINLABELEDITW = LVN_FIRST - 75 + LVN_ENDLABELEDITA = LVN_FIRST - 6 + LVN_ENDLABELEDITW = LVN_FIRST - 76 + LVN_COLUMNCLICK = LVN_FIRST - 8 + LVN_BEGINDRAG = LVN_FIRST - 9 + LVN_BEGINRDRAG = LVN_FIRST - 11 + LVN_ODCACHEHINT = LVN_FIRST - 13 + LVN_ODFINDITEMA = LVN_FIRST - 52 + LVN_ODFINDITEMW = LVN_FIRST - 79 + LVN_ITEMACTIVATE = LVN_FIRST - 14 + LVN_ODSTATECHANGED = LVN_FIRST - 15 + LVN_HOTTRACK = LVN_FIRST - 21 + LVN_GETDISPINFO = LVN_FIRST - 77 + LVN_SETDISPINFO = LVN_FIRST - 78 + LVN_KEYDOWN = LVN_FIRST - 55 + LVN_MARQUEEBEGIN = LVN_FIRST - 56 + LVN_GETINFOTIP = LVN_FIRST - 58 + LVN_INCREMENTALSEARCH = LVN_FIRST - 63 + LVN_BEGINSCROLL = LVN_FIRST - 80 + LVN_ENDSCROLL = LVN_FIRST - 81 +) + +// ListView LVNI constants +const ( + LVNI_ALL = 0 + LVNI_FOCUSED = 1 + LVNI_SELECTED = 2 + LVNI_CUT = 4 + LVNI_DROPHILITED = 8 + LVNI_ABOVE = 256 + LVNI_BELOW = 512 + LVNI_TOLEFT = 1024 + LVNI_TORIGHT = 2048 +) + +// ListView styles +const ( + LVS_ICON = 0x0000 + LVS_REPORT = 0x0001 + LVS_SMALLICON = 0x0002 + LVS_LIST = 0x0003 + LVS_TYPEMASK = 0x0003 + LVS_SINGLESEL = 0x0004 + LVS_SHOWSELALWAYS = 0x0008 + LVS_SORTASCENDING = 0x0010 + LVS_SORTDESCENDING = 0x0020 + LVS_SHAREIMAGELISTS = 0x0040 + LVS_NOLABELWRAP = 0x0080 + LVS_AUTOARRANGE = 0x0100 + LVS_EDITLABELS = 0x0200 + LVS_OWNERDATA = 0x1000 + LVS_NOSCROLL = 0x2000 + LVS_TYPESTYLEMASK = 0xfc00 + LVS_ALIGNTOP = 0x0000 + LVS_ALIGNLEFT = 0x0800 + LVS_ALIGNMASK = 0x0c00 + LVS_OWNERDRAWFIXED = 0x0400 + LVS_NOCOLUMNHEADER = 0x4000 + LVS_NOSORTHEADER = 0x8000 +) + +// ListView extended styles +const ( + LVS_EX_GRIDLINES = 0x00000001 + LVS_EX_SUBITEMIMAGES = 0x00000002 + LVS_EX_CHECKBOXES = 0x00000004 + LVS_EX_TRACKSELECT = 0x00000008 + LVS_EX_HEADERDRAGDROP = 0x00000010 + LVS_EX_FULLROWSELECT = 0x00000020 + LVS_EX_ONECLICKACTIVATE = 0x00000040 + LVS_EX_TWOCLICKACTIVATE = 0x00000080 + LVS_EX_FLATSB = 0x00000100 + LVS_EX_REGIONAL = 0x00000200 + LVS_EX_INFOTIP = 0x00000400 + LVS_EX_UNDERLINEHOT = 0x00000800 + LVS_EX_UNDERLINECOLD = 0x00001000 + LVS_EX_MULTIWORKAREAS = 0x00002000 + LVS_EX_LABELTIP = 0x00004000 + LVS_EX_BORDERSELECT = 0x00008000 + LVS_EX_DOUBLEBUFFER = 0x00010000 + LVS_EX_HIDELABELS = 0x00020000 + LVS_EX_SINGLEROW = 0x00040000 + LVS_EX_SNAPTOGRID = 0x00080000 + LVS_EX_SIMPLESELECT = 0x00100000 +) + +// ListView column flags +const ( + LVCF_FMT = 0x0001 + LVCF_WIDTH = 0x0002 + LVCF_TEXT = 0x0004 + LVCF_SUBITEM = 0x0008 + LVCF_IMAGE = 0x0010 + LVCF_ORDER = 0x0020 +) + +// ListView column format constants +const ( + LVCFMT_LEFT = 0x0000 + LVCFMT_RIGHT = 0x0001 + LVCFMT_CENTER = 0x0002 + LVCFMT_JUSTIFYMASK = 0x0003 + LVCFMT_IMAGE = 0x0800 + LVCFMT_BITMAP_ON_RIGHT = 0x1000 + LVCFMT_COL_HAS_IMAGES = 0x8000 +) + +// ListView item flags +const ( + LVIF_TEXT = 0x00000001 + LVIF_IMAGE = 0x00000002 + LVIF_PARAM = 0x00000004 + LVIF_STATE = 0x00000008 + LVIF_INDENT = 0x00000010 + LVIF_NORECOMPUTE = 0x00000800 + LVIF_GROUPID = 0x00000100 + LVIF_COLUMNS = 0x00000200 +) + +// ListView item states +const ( + LVIS_FOCUSED = 1 + LVIS_SELECTED = 2 + LVIS_CUT = 4 + LVIS_DROPHILITED = 8 + LVIS_OVERLAYMASK = 0xF00 + LVIS_STATEIMAGEMASK = 0xF000 +) + +// ListView hit test constants +const ( + LVHT_NOWHERE = 0x00000001 + LVHT_ONITEMICON = 0x00000002 + LVHT_ONITEMLABEL = 0x00000004 + LVHT_ONITEMSTATEICON = 0x00000008 + LVHT_ONITEM = LVHT_ONITEMICON | LVHT_ONITEMLABEL | LVHT_ONITEMSTATEICON + + LVHT_ABOVE = 0x00000008 + LVHT_BELOW = 0x00000010 + LVHT_TORIGHT = 0x00000020 + LVHT_TOLEFT = 0x00000040 +) + +// ListView image list types +const ( + LVSIL_NORMAL = 0 + LVSIL_SMALL = 1 + LVSIL_STATE = 2 + LVSIL_GROUPHEADER = 3 +) + +// InitCommonControlsEx flags +const ( + ICC_LISTVIEW_CLASSES = 1 + ICC_TREEVIEW_CLASSES = 2 + ICC_BAR_CLASSES = 4 + ICC_TAB_CLASSES = 8 + ICC_UPDOWN_CLASS = 16 + ICC_PROGRESS_CLASS = 32 + ICC_HOTKEY_CLASS = 64 + ICC_ANIMATE_CLASS = 128 + ICC_WIN95_CLASSES = 255 + ICC_DATE_CLASSES = 256 + ICC_USEREX_CLASSES = 512 + ICC_COOL_CLASSES = 1024 + ICC_INTERNET_CLASSES = 2048 + ICC_PAGESCROLLER_CLASS = 4096 + ICC_NATIVEFNTCTL_CLASS = 8192 + INFOTIPSIZE = 1024 + ICC_STANDARD_CLASSES = 0x00004000 + ICC_LINK_CLASS = 0x00008000 +) + +// Dialog Codes +const ( + DLGC_WANTARROWS = 0x0001 + DLGC_WANTTAB = 0x0002 + DLGC_WANTALLKEYS = 0x0004 + DLGC_WANTMESSAGE = 0x0004 + DLGC_HASSETSEL = 0x0008 + DLGC_DEFPUSHBUTTON = 0x0010 + DLGC_UNDEFPUSHBUTTON = 0x0020 + DLGC_RADIOBUTTON = 0x0040 + DLGC_WANTCHARS = 0x0080 + DLGC_STATIC = 0x0100 + DLGC_BUTTON = 0x2000 +) + +// Get/SetWindowWord/Long offsets for use with WC_DIALOG windows +const ( + DWL_MSGRESULT = 0 + DWL_DLGPROC = 4 + DWL_USER = 8 +) + +// Registry predefined keys +const ( + HKEY_CLASSES_ROOT HKEY = 0x80000000 + HKEY_CURRENT_USER HKEY = 0x80000001 + HKEY_LOCAL_MACHINE HKEY = 0x80000002 + HKEY_USERS HKEY = 0x80000003 + HKEY_PERFORMANCE_DATA HKEY = 0x80000004 + HKEY_CURRENT_CONFIG HKEY = 0x80000005 + HKEY_DYN_DATA HKEY = 0x80000006 +) + +// Registry Key Security and Access Rights +const ( + KEY_ALL_ACCESS = 0xF003F + KEY_CREATE_SUB_KEY = 0x0004 + KEY_ENUMERATE_SUB_KEYS = 0x0008 + KEY_NOTIFY = 0x0010 + KEY_QUERY_VALUE = 0x0001 + KEY_SET_VALUE = 0x0002 + KEY_READ = 0x20019 + KEY_WRITE = 0x20006 +) + +const ( + NFR_ANSI = 1 + NFR_UNICODE = 2 + NF_QUERY = 3 + NF_REQUERY = 4 +) + +// Registry value types +const ( + RRF_RT_REG_NONE = 0x00000001 + RRF_RT_REG_SZ = 0x00000002 + RRF_RT_REG_EXPAND_SZ = 0x00000004 + RRF_RT_REG_BINARY = 0x00000008 + RRF_RT_REG_DWORD = 0x00000010 + RRF_RT_REG_MULTI_SZ = 0x00000020 + RRF_RT_REG_QWORD = 0x00000040 + RRF_RT_DWORD = (RRF_RT_REG_BINARY | RRF_RT_REG_DWORD) + RRF_RT_QWORD = (RRF_RT_REG_BINARY | RRF_RT_REG_QWORD) + RRF_RT_ANY = 0x0000ffff + RRF_NOEXPAND = 0x10000000 + RRF_ZEROONFAILURE = 0x20000000 + REG_PROCESS_APPKEY = 0x00000001 + REG_MUI_STRING_TRUNCATE = 0x00000001 +) + +// PeekMessage wRemoveMsg value +const ( + PM_NOREMOVE = 0x000 + PM_REMOVE = 0x001 + PM_NOYIELD = 0x002 +) + +// ImageList flags +const ( + ILC_MASK = 0x00000001 + ILC_COLOR = 0x00000000 + ILC_COLORDDB = 0x000000FE + ILC_COLOR4 = 0x00000004 + ILC_COLOR8 = 0x00000008 + ILC_COLOR16 = 0x00000010 + ILC_COLOR24 = 0x00000018 + ILC_COLOR32 = 0x00000020 + ILC_PALETTE = 0x00000800 + ILC_MIRROR = 0x00002000 + ILC_PERITEMMIRROR = 0x00008000 + ILC_ORIGINALSIZE = 0x00010000 + ILC_HIGHQUALITYSCALE = 0x00020000 +) + +// Keystroke Message Flags +const ( + KF_EXTENDED = 0x0100 + KF_DLGMODE = 0x0800 + KF_MENUMODE = 0x1000 + KF_ALTDOWN = 0x2000 + KF_REPEAT = 0x4000 + KF_UP = 0x8000 +) + +// Virtual-Key Codes +const ( + VK_LBUTTON = 0x01 + VK_RBUTTON = 0x02 + VK_CANCEL = 0x03 + VK_MBUTTON = 0x04 + VK_XBUTTON1 = 0x05 + VK_XBUTTON2 = 0x06 + VK_BACK = 0x08 + VK_TAB = 0x09 + VK_CLEAR = 0x0C + VK_RETURN = 0x0D + VK_SHIFT = 0x10 + VK_CONTROL = 0x11 + VK_MENU = 0x12 + VK_PAUSE = 0x13 + VK_CAPITAL = 0x14 + VK_KANA = 0x15 + VK_HANGEUL = 0x15 + VK_HANGUL = 0x15 + VK_JUNJA = 0x17 + VK_FINAL = 0x18 + VK_HANJA = 0x19 + VK_KANJI = 0x19 + VK_ESCAPE = 0x1B + VK_CONVERT = 0x1C + VK_NONCONVERT = 0x1D + VK_ACCEPT = 0x1E + VK_MODECHANGE = 0x1F + VK_SPACE = 0x20 + VK_PRIOR = 0x21 + VK_NEXT = 0x22 + VK_END = 0x23 + VK_HOME = 0x24 + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + VK_SELECT = 0x29 + VK_PRINT = 0x2A + VK_EXECUTE = 0x2B + VK_SNAPSHOT = 0x2C + VK_INSERT = 0x2D + VK_DELETE = 0x2E + VK_HELP = 0x2F + VK_LWIN = 0x5B + VK_RWIN = 0x5C + VK_APPS = 0x5D + VK_SLEEP = 0x5F + VK_NUMPAD0 = 0x60 + VK_NUMPAD1 = 0x61 + VK_NUMPAD2 = 0x62 + VK_NUMPAD3 = 0x63 + VK_NUMPAD4 = 0x64 + VK_NUMPAD5 = 0x65 + VK_NUMPAD6 = 0x66 + VK_NUMPAD7 = 0x67 + VK_NUMPAD8 = 0x68 + VK_NUMPAD9 = 0x69 + VK_MULTIPLY = 0x6A + VK_ADD = 0x6B + VK_SEPARATOR = 0x6C + VK_SUBTRACT = 0x6D + VK_DECIMAL = 0x6E + VK_DIVIDE = 0x6F + VK_F1 = 0x70 + VK_F2 = 0x71 + VK_F3 = 0x72 + VK_F4 = 0x73 + VK_F5 = 0x74 + VK_F6 = 0x75 + VK_F7 = 0x76 + VK_F8 = 0x77 + VK_F9 = 0x78 + VK_F10 = 0x79 + VK_F11 = 0x7A + VK_F12 = 0x7B + VK_F13 = 0x7C + VK_F14 = 0x7D + VK_F15 = 0x7E + VK_F16 = 0x7F + VK_F17 = 0x80 + VK_F18 = 0x81 + VK_F19 = 0x82 + VK_F20 = 0x83 + VK_F21 = 0x84 + VK_F22 = 0x85 + VK_F23 = 0x86 + VK_F24 = 0x87 + VK_NUMLOCK = 0x90 + VK_SCROLL = 0x91 + VK_OEM_NEC_EQUAL = 0x92 + VK_OEM_FJ_JISHO = 0x92 + VK_OEM_FJ_MASSHOU = 0x93 + VK_OEM_FJ_TOUROKU = 0x94 + VK_OEM_FJ_LOYA = 0x95 + VK_OEM_FJ_ROYA = 0x96 + VK_LSHIFT = 0xA0 + VK_RSHIFT = 0xA1 + VK_LCONTROL = 0xA2 + VK_RCONTROL = 0xA3 + VK_LMENU = 0xA4 + VK_RMENU = 0xA5 + VK_BROWSER_BACK = 0xA6 + VK_BROWSER_FORWARD = 0xA7 + VK_BROWSER_REFRESH = 0xA8 + VK_BROWSER_STOP = 0xA9 + VK_BROWSER_SEARCH = 0xAA + VK_BROWSER_FAVORITES = 0xAB + VK_BROWSER_HOME = 0xAC + VK_VOLUME_MUTE = 0xAD + VK_VOLUME_DOWN = 0xAE + VK_VOLUME_UP = 0xAF + VK_MEDIA_NEXT_TRACK = 0xB0 + VK_MEDIA_PREV_TRACK = 0xB1 + VK_MEDIA_STOP = 0xB2 + VK_MEDIA_PLAY_PAUSE = 0xB3 + VK_LAUNCH_MAIL = 0xB4 + VK_LAUNCH_MEDIA_SELECT = 0xB5 + VK_LAUNCH_APP1 = 0xB6 + VK_LAUNCH_APP2 = 0xB7 + VK_OEM_1 = 0xBA + VK_OEM_PLUS = 0xBB + VK_OEM_COMMA = 0xBC + VK_OEM_MINUS = 0xBD + VK_OEM_PERIOD = 0xBE + VK_OEM_2 = 0xBF + VK_OEM_3 = 0xC0 + VK_OEM_4 = 0xDB + VK_OEM_5 = 0xDC + VK_OEM_6 = 0xDD + VK_OEM_7 = 0xDE + VK_OEM_8 = 0xDF + VK_OEM_AX = 0xE1 + VK_OEM_102 = 0xE2 + VK_ICO_HELP = 0xE3 + VK_ICO_00 = 0xE4 + VK_PROCESSKEY = 0xE5 + VK_ICO_CLEAR = 0xE6 + VK_OEM_RESET = 0xE9 + VK_OEM_JUMP = 0xEA + VK_OEM_PA1 = 0xEB + VK_OEM_PA2 = 0xEC + VK_OEM_PA3 = 0xED + VK_OEM_WSCTRL = 0xEE + VK_OEM_CUSEL = 0xEF + VK_OEM_ATTN = 0xF0 + VK_OEM_FINISH = 0xF1 + VK_OEM_COPY = 0xF2 + VK_OEM_AUTO = 0xF3 + VK_OEM_ENLW = 0xF4 + VK_OEM_BACKTAB = 0xF5 + VK_ATTN = 0xF6 + VK_CRSEL = 0xF7 + VK_EXSEL = 0xF8 + VK_EREOF = 0xF9 + VK_PLAY = 0xFA + VK_ZOOM = 0xFB + VK_NONAME = 0xFC + VK_PA1 = 0xFD + VK_OEM_CLEAR = 0xFE +) + +// Registry Value Types +const ( + REG_NONE = 0 + REG_SZ = 1 + REG_EXPAND_SZ = 2 + REG_BINARY = 3 + REG_DWORD = 4 + REG_DWORD_LITTLE_ENDIAN = 4 + REG_DWORD_BIG_ENDIAN = 5 + REG_LINK = 6 + REG_MULTI_SZ = 7 + REG_RESOURCE_LIST = 8 + REG_FULL_RESOURCE_DESCRIPTOR = 9 + REG_RESOURCE_REQUIREMENTS_LIST = 10 + REG_QWORD = 11 + REG_QWORD_LITTLE_ENDIAN = 11 +) + +// Tooltip styles +const ( + TTS_ALWAYSTIP = 0x01 + TTS_NOPREFIX = 0x02 + TTS_NOANIMATE = 0x10 + TTS_NOFADE = 0x20 + TTS_BALLOON = 0x40 + TTS_CLOSE = 0x80 + TTS_USEVISUALSTYLE = 0x100 +) + +// Tooltip messages +const ( + TTM_ACTIVATE = (WM_USER + 1) + TTM_SETDELAYTIME = (WM_USER + 3) + TTM_ADDTOOL = (WM_USER + 50) + TTM_DELTOOL = (WM_USER + 51) + TTM_NEWTOOLRECT = (WM_USER + 52) + TTM_RELAYEVENT = (WM_USER + 7) + TTM_GETTOOLINFO = (WM_USER + 53) + TTM_SETTOOLINFO = (WM_USER + 54) + TTM_HITTEST = (WM_USER + 55) + TTM_GETTEXT = (WM_USER + 56) + TTM_UPDATETIPTEXT = (WM_USER + 57) + TTM_GETTOOLCOUNT = (WM_USER + 13) + TTM_ENUMTOOLS = (WM_USER + 58) + TTM_GETCURRENTTOOL = (WM_USER + 59) + TTM_WINDOWFROMPOINT = (WM_USER + 16) + TTM_TRACKACTIVATE = (WM_USER + 17) + TTM_TRACKPOSITION = (WM_USER + 18) + TTM_SETTIPBKCOLOR = (WM_USER + 19) + TTM_SETTIPTEXTCOLOR = (WM_USER + 20) + TTM_GETDELAYTIME = (WM_USER + 21) + TTM_GETTIPBKCOLOR = (WM_USER + 22) + TTM_GETTIPTEXTCOLOR = (WM_USER + 23) + TTM_SETMAXTIPWIDTH = (WM_USER + 24) + TTM_GETMAXTIPWIDTH = (WM_USER + 25) + TTM_SETMARGIN = (WM_USER + 26) + TTM_GETMARGIN = (WM_USER + 27) + TTM_POP = (WM_USER + 28) + TTM_UPDATE = (WM_USER + 29) + TTM_GETBUBBLESIZE = (WM_USER + 30) + TTM_ADJUSTRECT = (WM_USER + 31) + TTM_SETTITLE = (WM_USER + 33) + TTM_POPUP = (WM_USER + 34) + TTM_GETTITLE = (WM_USER + 35) +) + +// Tooltip icons +const ( + TTI_NONE = 0 + TTI_INFO = 1 + TTI_WARNING = 2 + TTI_ERROR = 3 + TTI_INFO_LARGE = 4 + TTI_WARNING_LARGE = 5 + TTI_ERROR_LARGE = 6 +) + +// Tooltip notifications +const ( + TTN_FIRST = -520 + TTN_LAST = -549 + TTN_GETDISPINFO = (TTN_FIRST - 10) + TTN_SHOW = (TTN_FIRST - 1) + TTN_POP = (TTN_FIRST - 2) + TTN_LINKCLICK = (TTN_FIRST - 3) + TTN_NEEDTEXT = TTN_GETDISPINFO +) + +const ( + TTF_IDISHWND = 0x0001 + TTF_CENTERTIP = 0x0002 + TTF_RTLREADING = 0x0004 + TTF_SUBCLASS = 0x0010 + TTF_TRACK = 0x0020 + TTF_ABSOLUTE = 0x0080 + TTF_TRANSPARENT = 0x0100 + TTF_PARSELINKS = 0x1000 + TTF_DI_SETITEM = 0x8000 +) + +const ( + SWP_NOSIZE = 0x0001 + SWP_NOMOVE = 0x0002 + SWP_NOZORDER = 0x0004 + SWP_NOREDRAW = 0x0008 + SWP_NOACTIVATE = 0x0010 + SWP_FRAMECHANGED = 0x0020 + SWP_SHOWWINDOW = 0x0040 + SWP_HIDEWINDOW = 0x0080 + SWP_NOCOPYBITS = 0x0100 + SWP_NOOWNERZORDER = 0x0200 + SWP_NOSENDCHANGING = 0x0400 + SWP_DRAWFRAME = SWP_FRAMECHANGED + SWP_NOREPOSITION = SWP_NOOWNERZORDER + SWP_DEFERERASE = 0x2000 + SWP_ASYNCWINDOWPOS = 0x4000 +) + +// Predefined window handles +const ( + HWND_BROADCAST = HWND(0xFFFF) + HWND_BOTTOM = HWND(1) + HWND_NOTOPMOST = ^HWND(1) // -2 + HWND_TOP = HWND(0) + HWND_TOPMOST = ^HWND(0) // -1 + HWND_DESKTOP = HWND(0) + HWND_MESSAGE = ^HWND(2) // -3 +) + +// Pen types +const ( + PS_COSMETIC = 0x00000000 + PS_GEOMETRIC = 0x00010000 + PS_TYPE_MASK = 0x000F0000 +) + +// Pen styles +const ( + PS_SOLID = 0 + PS_DASH = 1 + PS_DOT = 2 + PS_DASHDOT = 3 + PS_DASHDOTDOT = 4 + PS_NULL = 5 + PS_INSIDEFRAME = 6 + PS_USERSTYLE = 7 + PS_ALTERNATE = 8 + PS_STYLE_MASK = 0x0000000F +) + +// Pen cap types +const ( + PS_ENDCAP_ROUND = 0x00000000 + PS_ENDCAP_SQUARE = 0x00000100 + PS_ENDCAP_FLAT = 0x00000200 + PS_ENDCAP_MASK = 0x00000F00 +) + +// Pen join types +const ( + PS_JOIN_ROUND = 0x00000000 + PS_JOIN_BEVEL = 0x00001000 + PS_JOIN_MITER = 0x00002000 + PS_JOIN_MASK = 0x0000F000 +) + +// Hatch styles +const ( + HS_HORIZONTAL = 0 + HS_VERTICAL = 1 + HS_FDIAGONAL = 2 + HS_BDIAGONAL = 3 + HS_CROSS = 4 + HS_DIAGCROSS = 5 +) + +// Stock Logical Objects +const ( + WHITE_BRUSH = 0 + LTGRAY_BRUSH = 1 + GRAY_BRUSH = 2 + DKGRAY_BRUSH = 3 + BLACK_BRUSH = 4 + NULL_BRUSH = 5 + HOLLOW_BRUSH = NULL_BRUSH + WHITE_PEN = 6 + BLACK_PEN = 7 + NULL_PEN = 8 + OEM_FIXED_FONT = 10 + ANSI_FIXED_FONT = 11 + ANSI_VAR_FONT = 12 + SYSTEM_FONT = 13 + DEVICE_DEFAULT_FONT = 14 + DEFAULT_PALETTE = 15 + SYSTEM_FIXED_FONT = 16 + DEFAULT_GUI_FONT = 17 + DC_BRUSH = 18 + DC_PEN = 19 +) + +// Brush styles +const ( + BS_SOLID = 0 + BS_NULL = 1 + BS_HOLLOW = BS_NULL + BS_HATCHED = 2 + BS_PATTERN = 3 + BS_INDEXED = 4 + BS_DIBPATTERN = 5 + BS_DIBPATTERNPT = 6 + BS_PATTERN8X8 = 7 + BS_DIBPATTERN8X8 = 8 + BS_MONOPATTERN = 9 +) + +// TRACKMOUSEEVENT flags +const ( + TME_HOVER = 0x00000001 + TME_LEAVE = 0x00000002 + TME_NONCLIENT = 0x00000010 + TME_QUERY = 0x40000000 + TME_CANCEL = 0x80000000 + + HOVER_DEFAULT = 0xFFFFFFFF +) + +// WM_NCHITTEST and MOUSEHOOKSTRUCT Mouse Position Codes +const ( + HTERROR = (-2) + HTTRANSPARENT = (-1) + HTNOWHERE = 0 + HTCLIENT = 1 + HTCAPTION = 2 + HTSYSMENU = 3 + HTGROWBOX = 4 + HTSIZE = HTGROWBOX + HTMENU = 5 + HTHSCROLL = 6 + HTVSCROLL = 7 + HTMINBUTTON = 8 + HTMAXBUTTON = 9 + HTLEFT = 10 + HTRIGHT = 11 + HTTOP = 12 + HTTOPLEFT = 13 + HTTOPRIGHT = 14 + HTBOTTOM = 15 + HTBOTTOMLEFT = 16 + HTBOTTOMRIGHT = 17 + HTBORDER = 18 + HTREDUCE = HTMINBUTTON + HTZOOM = HTMAXBUTTON + HTSIZEFIRST = HTLEFT + HTSIZELAST = HTBOTTOMRIGHT + HTOBJECT = 19 + HTCLOSE = 20 + HTHELP = 21 +) + +// DrawText[Ex] format flags +const ( + DT_TOP = 0x00000000 + DT_LEFT = 0x00000000 + DT_CENTER = 0x00000001 + DT_RIGHT = 0x00000002 + DT_VCENTER = 0x00000004 + DT_BOTTOM = 0x00000008 + DT_WORDBREAK = 0x00000010 + DT_SINGLELINE = 0x00000020 + DT_EXPANDTABS = 0x00000040 + DT_TABSTOP = 0x00000080 + DT_NOCLIP = 0x00000100 + DT_EXTERNALLEADING = 0x00000200 + DT_CALCRECT = 0x00000400 + DT_NOPREFIX = 0x00000800 + DT_INTERNAL = 0x00001000 + DT_EDITCONTROL = 0x00002000 + DT_PATH_ELLIPSIS = 0x00004000 + DT_END_ELLIPSIS = 0x00008000 + DT_MODIFYSTRING = 0x00010000 + DT_RTLREADING = 0x00020000 + DT_WORD_ELLIPSIS = 0x00040000 + DT_NOFULLWIDTHCHARBREAK = 0x00080000 + DT_HIDEPREFIX = 0x00100000 + DT_PREFIXONLY = 0x00200000 +) + +const CLR_INVALID = 0xFFFFFFFF + +// Background Modes +const ( + TRANSPARENT = 1 + OPAQUE = 2 + BKMODE_LAST = 2 +) + +// Global Memory Flags +const ( + GMEM_FIXED = 0x0000 + GMEM_MOVEABLE = 0x0002 + GMEM_NOCOMPACT = 0x0010 + GMEM_NODISCARD = 0x0020 + GMEM_ZEROINIT = 0x0040 + GMEM_MODIFY = 0x0080 + GMEM_DISCARDABLE = 0x0100 + GMEM_NOT_BANKED = 0x1000 + GMEM_SHARE = 0x2000 + GMEM_DDESHARE = 0x2000 + GMEM_NOTIFY = 0x4000 + GMEM_LOWER = GMEM_NOT_BANKED + GMEM_VALID_FLAGS = 0x7F72 + GMEM_INVALID_HANDLE = 0x8000 + GHND = (GMEM_MOVEABLE | GMEM_ZEROINIT) + GPTR = (GMEM_FIXED | GMEM_ZEROINIT) +) + +// Ternary raster operations +const ( + SRCCOPY = 0x00CC0020 + SRCPAINT = 0x00EE0086 + SRCAND = 0x008800C6 + SRCINVERT = 0x00660046 + SRCERASE = 0x00440328 + NOTSRCCOPY = 0x00330008 + NOTSRCERASE = 0x001100A6 + MERGECOPY = 0x00C000CA + MERGEPAINT = 0x00BB0226 + PATCOPY = 0x00F00021 + PATPAINT = 0x00FB0A09 + PATINVERT = 0x005A0049 + DSTINVERT = 0x00550009 + BLACKNESS = 0x00000042 + WHITENESS = 0x00FF0062 + NOMIRRORBITMAP = 0x80000000 + CAPTUREBLT = 0x40000000 +) + +// Clipboard formats +const ( + CF_TEXT = 1 + CF_BITMAP = 2 + CF_METAFILEPICT = 3 + CF_SYLK = 4 + CF_DIF = 5 + CF_TIFF = 6 + CF_OEMTEXT = 7 + CF_DIB = 8 + CF_PALETTE = 9 + CF_PENDATA = 10 + CF_RIFF = 11 + CF_WAVE = 12 + CF_UNICODETEXT = 13 + CF_ENHMETAFILE = 14 + CF_HDROP = 15 + CF_LOCALE = 16 + CF_DIBV5 = 17 + CF_MAX = 18 + CF_OWNERDISPLAY = 0x0080 + CF_DSPTEXT = 0x0081 + CF_DSPBITMAP = 0x0082 + CF_DSPMETAFILEPICT = 0x0083 + CF_DSPENHMETAFILE = 0x008E + CF_PRIVATEFIRST = 0x0200 + CF_PRIVATELAST = 0x02FF + CF_GDIOBJFIRST = 0x0300 + CF_GDIOBJLAST = 0x03FF +) + +// Bitmap compression formats +const ( + BI_RGB = 0 + BI_RLE8 = 1 + BI_RLE4 = 2 + BI_BITFIELDS = 3 + BI_JPEG = 4 + BI_PNG = 5 +) + +// SetDIBitsToDevice fuColorUse +const ( + DIB_PAL_COLORS = 1 + DIB_RGB_COLORS = 0 +) + +const ( + STANDARD_RIGHTS_REQUIRED = 0x000F +) + +// Service Control Manager object specific access types +const ( + SC_MANAGER_CONNECT = 0x0001 + SC_MANAGER_CREATE_SERVICE = 0x0002 + SC_MANAGER_ENUMERATE_SERVICE = 0x0004 + SC_MANAGER_LOCK = 0x0008 + SC_MANAGER_QUERY_LOCK_STATUS = 0x0010 + SC_MANAGER_MODIFY_BOOT_CONFIG = 0x0020 + SC_MANAGER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SC_MANAGER_CONNECT | SC_MANAGER_CREATE_SERVICE | SC_MANAGER_ENUMERATE_SERVICE | SC_MANAGER_LOCK | SC_MANAGER_QUERY_LOCK_STATUS | SC_MANAGER_MODIFY_BOOT_CONFIG +) + +// Service Types (Bit Mask) +const ( + SERVICE_KERNEL_DRIVER = 0x00000001 + SERVICE_FILE_SYSTEM_DRIVER = 0x00000002 + SERVICE_ADAPTER = 0x00000004 + SERVICE_RECOGNIZER_DRIVER = 0x00000008 + SERVICE_DRIVER = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER + SERVICE_WIN32_OWN_PROCESS = 0x00000010 + SERVICE_WIN32_SHARE_PROCESS = 0x00000020 + SERVICE_WIN32 = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS + SERVICE_INTERACTIVE_PROCESS = 0x00000100 + SERVICE_TYPE_ALL = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS +) + +// Service State -- for CurrentState +const ( + SERVICE_STOPPED = 0x00000001 + SERVICE_START_PENDING = 0x00000002 + SERVICE_STOP_PENDING = 0x00000003 + SERVICE_RUNNING = 0x00000004 + SERVICE_CONTINUE_PENDING = 0x00000005 + SERVICE_PAUSE_PENDING = 0x00000006 + SERVICE_PAUSED = 0x00000007 +) + +// Controls Accepted (Bit Mask) +const ( + SERVICE_ACCEPT_STOP = 0x00000001 + SERVICE_ACCEPT_PAUSE_CONTINUE = 0x00000002 + SERVICE_ACCEPT_SHUTDOWN = 0x00000004 + SERVICE_ACCEPT_PARAMCHANGE = 0x00000008 + SERVICE_ACCEPT_NETBINDCHANGE = 0x00000010 + SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 0x00000020 + SERVICE_ACCEPT_POWEREVENT = 0x00000040 + SERVICE_ACCEPT_SESSIONCHANGE = 0x00000080 + SERVICE_ACCEPT_PRESHUTDOWN = 0x00000100 + SERVICE_ACCEPT_TIMECHANGE = 0x00000200 + SERVICE_ACCEPT_TRIGGEREVENT = 0x00000400 +) + +// Service object specific access type +const ( + SERVICE_QUERY_CONFIG = 0x0001 + SERVICE_CHANGE_CONFIG = 0x0002 + SERVICE_QUERY_STATUS = 0x0004 + SERVICE_ENUMERATE_DEPENDENTS = 0x0008 + SERVICE_START = 0x0010 + SERVICE_STOP = 0x0020 + SERVICE_PAUSE_CONTINUE = 0x0040 + SERVICE_INTERROGATE = 0x0080 + SERVICE_USER_DEFINED_CONTROL = 0x0100 + + SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | + SERVICE_QUERY_CONFIG | + SERVICE_CHANGE_CONFIG | + SERVICE_QUERY_STATUS | + SERVICE_ENUMERATE_DEPENDENTS | + SERVICE_START | + SERVICE_STOP | + SERVICE_PAUSE_CONTINUE | + SERVICE_INTERROGATE | + SERVICE_USER_DEFINED_CONTROL +) + +// MapVirtualKey maptypes +const ( + MAPVK_VK_TO_CHAR = 2 + MAPVK_VK_TO_VSC = 0 + MAPVK_VSC_TO_VK = 1 + MAPVK_VSC_TO_VK_EX = 3 +) + +// ReadEventLog Flags +const ( + EVENTLOG_SEEK_READ = 0x0002 + EVENTLOG_SEQUENTIAL_READ = 0x0001 + EVENTLOG_FORWARDS_READ = 0x0004 + EVENTLOG_BACKWARDS_READ = 0x0008 +) + +// CreateToolhelp32Snapshot flags +const ( + TH32CS_SNAPHEAPLIST = 0x00000001 + TH32CS_SNAPPROCESS = 0x00000002 + TH32CS_SNAPTHREAD = 0x00000004 + TH32CS_SNAPMODULE = 0x00000008 + TH32CS_SNAPMODULE32 = 0x00000010 + TH32CS_INHERIT = 0x80000000 + TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD +) + +const ( + MAX_MODULE_NAME32 = 255 + MAX_PATH = 260 +) + +const ( + FOREGROUND_BLUE = 0x0001 + FOREGROUND_GREEN = 0x0002 + FOREGROUND_RED = 0x0004 + FOREGROUND_INTENSITY = 0x0008 + BACKGROUND_BLUE = 0x0010 + BACKGROUND_GREEN = 0x0020 + BACKGROUND_RED = 0x0040 + BACKGROUND_INTENSITY = 0x0080 + COMMON_LVB_LEADING_BYTE = 0x0100 + COMMON_LVB_TRAILING_BYTE = 0x0200 + COMMON_LVB_GRID_HORIZONTAL = 0x0400 + COMMON_LVB_GRID_LVERTICAL = 0x0800 + COMMON_LVB_GRID_RVERTICAL = 0x1000 + COMMON_LVB_REVERSE_VIDEO = 0x4000 + COMMON_LVB_UNDERSCORE = 0x8000 +) + +// Flags used by the DWM_BLURBEHIND structure to indicate +// which of its members contain valid information. +const ( + DWM_BB_ENABLE = 0x00000001 // A value for the fEnable member has been specified. + DWM_BB_BLURREGION = 0x00000002 // A value for the hRgnBlur member has been specified. + DWM_BB_TRANSITIONONMAXIMIZED = 0x00000004 // A value for the fTransitionOnMaximized member has been specified. +) + +// Flags used by the DwmEnableComposition function +// to change the state of Desktop Window Manager (DWM) composition. +const ( + DWM_EC_DISABLECOMPOSITION = 0 // Disable composition + DWM_EC_ENABLECOMPOSITION = 1 // Enable composition +) + +// enum-lite implementation for the following constant structure +type DWM_SHOWCONTACT int32 + +const ( + DWMSC_DOWN = 0x00000001 + DWMSC_UP = 0x00000002 + DWMSC_DRAG = 0x00000004 + DWMSC_HOLD = 0x00000008 + DWMSC_PENBARREL = 0x00000010 + DWMSC_NONE = 0x00000000 + DWMSC_ALL = 0xFFFFFFFF +) + +// enum-lite implementation for the following constant structure +type DWM_SOURCE_FRAME_SAMPLING int32 + +// TODO: need to verify this construction +// Flags used by the DwmSetPresentParameters function +// to specify the frame sampling type +const ( + DWM_SOURCE_FRAME_SAMPLING_POINT = iota + 1 + DWM_SOURCE_FRAME_SAMPLING_COVERAGE + DWM_SOURCE_FRAME_SAMPLING_LAST +) + +// Flags used by the DWM_THUMBNAIL_PROPERTIES structure to +// indicate which of its members contain valid information. +const ( + DWM_TNP_RECTDESTINATION = 0x00000001 // A value for the rcDestination member has been specified + DWM_TNP_RECTSOURCE = 0x00000002 // A value for the rcSource member has been specified + DWM_TNP_OPACITY = 0x00000004 // A value for the opacity member has been specified + DWM_TNP_VISIBLE = 0x00000008 // A value for the fVisible member has been specified + DWM_TNP_SOURCECLIENTAREAONLY = 0x00000010 // A value for the fSourceClientAreaOnly member has been specified +) + +// enum-lite implementation for the following constant structure +type DWMFLIP3DWINDOWPOLICY int32 + +// TODO: need to verify this construction +// Flags used by the DwmSetWindowAttribute function +// to specify the Flip3D window policy +const ( + DWMFLIP3D_DEFAULT = iota + 1 + DWMFLIP3D_EXCLUDEBELOW + DWMFLIP3D_EXCLUDEABOVE + DWMFLIP3D_LAST +) + +// enum-lite implementation for the following constant structure +type DWMNCRENDERINGPOLICY int32 + +// TODO: need to verify this construction +// Flags used by the DwmSetWindowAttribute function +// to specify the non-client area rendering policy +const ( + DWMNCRP_USEWINDOWSTYLE = iota + 1 + DWMNCRP_DISABLED + DWMNCRP_ENABLED + DWMNCRP_LAST +) + +// enum-lite implementation for the following constant structure +type DWMTRANSITION_OWNEDWINDOW_TARGET int32 + +const ( + DWMTRANSITION_OWNEDWINDOW_NULL = -1 + DWMTRANSITION_OWNEDWINDOW_REPOSITION = 0 +) + +// enum-lite implementation for the following constant structure +type DWMWINDOWATTRIBUTE int32 + +// TODO: need to verify this construction +// Flags used by the DwmGetWindowAttribute and DwmSetWindowAttribute functions +// to specify window attributes for non-client rendering +const ( + DWMWA_NCRENDERING_ENABLED = iota + 1 + DWMWA_NCRENDERING_POLICY + DWMWA_TRANSITIONS_FORCEDISABLED + DWMWA_ALLOW_NCPAINT + DWMWA_CAPTION_BUTTON_BOUNDS + DWMWA_NONCLIENT_RTL_LAYOUT + DWMWA_FORCE_ICONIC_REPRESENTATION + DWMWA_FLIP3D_POLICY + DWMWA_EXTENDED_FRAME_BOUNDS + DWMWA_HAS_ICONIC_BITMAP + DWMWA_DISALLOW_PEEK + DWMWA_EXCLUDED_FROM_PEEK + DWMWA_CLOAK + DWMWA_CLOAKED + DWMWA_FREEZE_REPRESENTATION + DWMWA_LAST +) + +// enum-lite implementation for the following constant structure +type GESTURE_TYPE int32 + +// TODO: use iota? +// Identifies the gesture type +const ( + GT_PEN_TAP = 0 + GT_PEN_DOUBLETAP = 1 + GT_PEN_RIGHTTAP = 2 + GT_PEN_PRESSANDHOLD = 3 + GT_PEN_PRESSANDHOLDABORT = 4 + GT_TOUCH_TAP = 5 + GT_TOUCH_DOUBLETAP = 6 + GT_TOUCH_RIGHTTAP = 7 + GT_TOUCH_PRESSANDHOLD = 8 + GT_TOUCH_PRESSANDHOLDABORT = 9 + GT_TOUCH_PRESSANDTAP = 10 +) + +// Icons +const ( + ICON_SMALL = 0 + ICON_BIG = 1 + ICON_SMALL2 = 2 +) + +const ( + SIZE_RESTORED = 0 + SIZE_MINIMIZED = 1 + SIZE_MAXIMIZED = 2 + SIZE_MAXSHOW = 3 + SIZE_MAXHIDE = 4 +) + +// XButton values +const ( + XBUTTON1 = 1 + XBUTTON2 = 2 +) + +// Devmode +const ( + DM_SPECVERSION = 0x0401 + + DM_ORIENTATION = 0x00000001 + DM_PAPERSIZE = 0x00000002 + DM_PAPERLENGTH = 0x00000004 + DM_PAPERWIDTH = 0x00000008 + DM_SCALE = 0x00000010 + DM_POSITION = 0x00000020 + DM_NUP = 0x00000040 + DM_DISPLAYORIENTATION = 0x00000080 + DM_COPIES = 0x00000100 + DM_DEFAULTSOURCE = 0x00000200 + DM_PRINTQUALITY = 0x00000400 + DM_COLOR = 0x00000800 + DM_DUPLEX = 0x00001000 + DM_YRESOLUTION = 0x00002000 + DM_TTOPTION = 0x00004000 + DM_COLLATE = 0x00008000 + DM_FORMNAME = 0x00010000 + DM_LOGPIXELS = 0x00020000 + DM_BITSPERPEL = 0x00040000 + DM_PELSWIDTH = 0x00080000 + DM_PELSHEIGHT = 0x00100000 + DM_DISPLAYFLAGS = 0x00200000 + DM_DISPLAYFREQUENCY = 0x00400000 + DM_ICMMETHOD = 0x00800000 + DM_ICMINTENT = 0x01000000 + DM_MEDIATYPE = 0x02000000 + DM_DITHERTYPE = 0x04000000 + DM_PANNINGWIDTH = 0x08000000 + DM_PANNINGHEIGHT = 0x10000000 + DM_DISPLAYFIXEDOUTPUT = 0x20000000 +) + +// ChangeDisplaySettings +const ( + CDS_UPDATEREGISTRY = 0x00000001 + CDS_TEST = 0x00000002 + CDS_FULLSCREEN = 0x00000004 + CDS_GLOBAL = 0x00000008 + CDS_SET_PRIMARY = 0x00000010 + CDS_VIDEOPARAMETERS = 0x00000020 + CDS_RESET = 0x40000000 + CDS_NORESET = 0x10000000 + + DISP_CHANGE_SUCCESSFUL = 0 + DISP_CHANGE_RESTART = 1 + DISP_CHANGE_FAILED = -1 + DISP_CHANGE_BADMODE = -2 + DISP_CHANGE_NOTUPDATED = -3 + DISP_CHANGE_BADFLAGS = -4 + DISP_CHANGE_BADPARAM = -5 + DISP_CHANGE_BADDUALVIEW = -6 +) + +const ( + ENUM_CURRENT_SETTINGS = 0xFFFFFFFF + ENUM_REGISTRY_SETTINGS = 0xFFFFFFFE +) + +// PIXELFORMATDESCRIPTOR +const ( + PFD_TYPE_RGBA = 0 + PFD_TYPE_COLORINDEX = 1 + + PFD_MAIN_PLANE = 0 + PFD_OVERLAY_PLANE = 1 + PFD_UNDERLAY_PLANE = -1 + + PFD_DOUBLEBUFFER = 0x00000001 + PFD_STEREO = 0x00000002 + PFD_DRAW_TO_WINDOW = 0x00000004 + PFD_DRAW_TO_BITMAP = 0x00000008 + PFD_SUPPORT_GDI = 0x00000010 + PFD_SUPPORT_OPENGL = 0x00000020 + PFD_GENERIC_FORMAT = 0x00000040 + PFD_NEED_PALETTE = 0x00000080 + PFD_NEED_SYSTEM_PALETTE = 0x00000100 + PFD_SWAP_EXCHANGE = 0x00000200 + PFD_SWAP_COPY = 0x00000400 + PFD_SWAP_LAYER_BUFFERS = 0x00000800 + PFD_GENERIC_ACCELERATED = 0x00001000 + PFD_SUPPORT_DIRECTDRAW = 0x00002000 + PFD_DIRECT3D_ACCELERATED = 0x00004000 + PFD_SUPPORT_COMPOSITION = 0x00008000 + + PFD_DEPTH_DONTCARE = 0x20000000 + PFD_DOUBLEBUFFER_DONTCARE = 0x40000000 + PFD_STEREO_DONTCARE = 0x80000000 +) + +const ( + INPUT_MOUSE = 0 + INPUT_KEYBOARD = 1 + INPUT_HARDWARE = 2 +) + +const ( + MOUSEEVENTF_ABSOLUTE = 0x8000 + MOUSEEVENTF_HWHEEL = 0x01000 + MOUSEEVENTF_MOVE = 0x0001 + MOUSEEVENTF_MOVE_NOCOALESCE = 0x2000 + MOUSEEVENTF_LEFTDOWN = 0x0002 + MOUSEEVENTF_LEFTUP = 0x0004 + MOUSEEVENTF_RIGHTDOWN = 0x0008 + MOUSEEVENTF_RIGHTUP = 0x0010 + MOUSEEVENTF_MIDDLEDOWN = 0x0020 + MOUSEEVENTF_MIDDLEUP = 0x0040 + MOUSEEVENTF_VIRTUALDESK = 0x4000 + MOUSEEVENTF_WHEEL = 0x0800 + MOUSEEVENTF_XDOWN = 0x0080 + MOUSEEVENTF_XUP = 0x0100 +) diff --git a/vendor/github.com/shirou/w32/dwmapi.go b/vendor/github.com/shirou/w32/dwmapi.go new file mode 100644 index 0000000..139b937 --- /dev/null +++ b/vendor/github.com/shirou/w32/dwmapi.go @@ -0,0 +1,256 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "fmt" + "syscall" + "unsafe" +) + +// DEFINED IN THE DWM API BUT NOT IMPLEMENTED BY MS: +// DwmAttachMilContent +// DwmDetachMilContent +// DwmEnableComposition +// DwmGetGraphicsStreamClient +// DwmGetGraphicsStreamTransformHint + +var ( + moddwmapi = syscall.NewLazyDLL("dwmapi.dll") + + procDwmDefWindowProc = moddwmapi.NewProc("DwmDefWindowProc") + procDwmEnableBlurBehindWindow = moddwmapi.NewProc("DwmEnableBlurBehindWindow") + procDwmEnableMMCSS = moddwmapi.NewProc("DwmEnableMMCSS") + procDwmExtendFrameIntoClientArea = moddwmapi.NewProc("DwmExtendFrameIntoClientArea") + procDwmFlush = moddwmapi.NewProc("DwmFlush") + procDwmGetColorizationColor = moddwmapi.NewProc("DwmGetColorizationColor") + procDwmGetCompositionTimingInfo = moddwmapi.NewProc("DwmGetCompositionTimingInfo") + procDwmGetTransportAttributes = moddwmapi.NewProc("DwmGetTransportAttributes") + procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") + procDwmInvalidateIconicBitmaps = moddwmapi.NewProc("DwmInvalidateIconicBitmaps") + procDwmIsCompositionEnabled = moddwmapi.NewProc("DwmIsCompositionEnabled") + procDwmModifyPreviousDxFrameDuration = moddwmapi.NewProc("DwmModifyPreviousDxFrameDuration") + procDwmQueryThumbnailSourceSize = moddwmapi.NewProc("DwmQueryThumbnailSourceSize") + procDwmRegisterThumbnail = moddwmapi.NewProc("DwmRegisterThumbnail") + procDwmRenderGesture = moddwmapi.NewProc("DwmRenderGesture") + procDwmSetDxFrameDuration = moddwmapi.NewProc("DwmSetDxFrameDuration") + procDwmSetIconicLivePreviewBitmap = moddwmapi.NewProc("DwmSetIconicLivePreviewBitmap") + procDwmSetIconicThumbnail = moddwmapi.NewProc("DwmSetIconicThumbnail") + procDwmSetPresentParameters = moddwmapi.NewProc("DwmSetPresentParameters") + procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procDwmShowContact = moddwmapi.NewProc("DwmShowContact") + procDwmTetherContact = moddwmapi.NewProc("DwmTetherContact") + procDwmTransitionOwnedWindow = moddwmapi.NewProc("DwmTransitionOwnedWindow") + procDwmUnregisterThumbnail = moddwmapi.NewProc("DwmUnregisterThumbnail") + procDwmUpdateThumbnailProperties = moddwmapi.NewProc("DwmUpdateThumbnailProperties") +) + +func DwmDefWindowProc(hWnd HWND, msg uint, wParam, lParam uintptr) (bool, uint) { + var result uint + ret, _, _ := procDwmDefWindowProc.Call( + uintptr(hWnd), + uintptr(msg), + wParam, + lParam, + uintptr(unsafe.Pointer(&result))) + return ret != 0, result +} + +func DwmEnableBlurBehindWindow(hWnd HWND, pBlurBehind *DWM_BLURBEHIND) HRESULT { + ret, _, _ := procDwmEnableBlurBehindWindow.Call( + uintptr(hWnd), + uintptr(unsafe.Pointer(pBlurBehind))) + return HRESULT(ret) +} + +func DwmEnableMMCSS(fEnableMMCSS bool) HRESULT { + ret, _, _ := procDwmEnableMMCSS.Call( + uintptr(BoolToBOOL(fEnableMMCSS))) + return HRESULT(ret) +} + +func DwmExtendFrameIntoClientArea(hWnd HWND, pMarInset *MARGINS) HRESULT { + ret, _, _ := procDwmExtendFrameIntoClientArea.Call( + uintptr(hWnd), + uintptr(unsafe.Pointer(pMarInset))) + return HRESULT(ret) +} + +func DwmFlush() HRESULT { + ret, _, _ := procDwmFlush.Call() + return HRESULT(ret) +} + +func DwmGetColorizationColor(pcrColorization *uint32, pfOpaqueBlend *BOOL) HRESULT { + ret, _, _ := procDwmGetColorizationColor.Call( + uintptr(unsafe.Pointer(pcrColorization)), + uintptr(unsafe.Pointer(pfOpaqueBlend))) + return HRESULT(ret) +} + +func DwmGetCompositionTimingInfo(hWnd HWND, pTimingInfo *DWM_TIMING_INFO) HRESULT { + ret, _, _ := procDwmGetCompositionTimingInfo.Call( + uintptr(hWnd), + uintptr(unsafe.Pointer(pTimingInfo))) + return HRESULT(ret) +} + +func DwmGetTransportAttributes(pfIsRemoting *BOOL, pfIsConnected *BOOL, pDwGeneration *uint32) HRESULT { + ret, _, _ := procDwmGetTransportAttributes.Call( + uintptr(unsafe.Pointer(pfIsRemoting)), + uintptr(unsafe.Pointer(pfIsConnected)), + uintptr(unsafe.Pointer(pDwGeneration))) + return HRESULT(ret) +} + +// TODO: verify handling of variable arguments +func DwmGetWindowAttribute(hWnd HWND, dwAttribute uint32) (pAttribute interface{}, result HRESULT) { + var pvAttribute, pvAttrSize uintptr + switch dwAttribute { + case DWMWA_NCRENDERING_ENABLED: + v := new(BOOL) + pAttribute = v + pvAttribute = uintptr(unsafe.Pointer(v)) + pvAttrSize = unsafe.Sizeof(*v) + case DWMWA_CAPTION_BUTTON_BOUNDS, DWMWA_EXTENDED_FRAME_BOUNDS: + v := new(RECT) + pAttribute = v + pvAttribute = uintptr(unsafe.Pointer(v)) + pvAttrSize = unsafe.Sizeof(*v) + case DWMWA_CLOAKED: + panic(fmt.Sprintf("DwmGetWindowAttribute(%d) is not currently supported.", dwAttribute)) + default: + panic(fmt.Sprintf("DwmGetWindowAttribute(%d) is not valid.", dwAttribute)) + } + + ret, _, _ := procDwmGetWindowAttribute.Call( + uintptr(hWnd), + uintptr(dwAttribute), + pvAttribute, + pvAttrSize) + result = HRESULT(ret) + return +} + +func DwmInvalidateIconicBitmaps(hWnd HWND) HRESULT { + ret, _, _ := procDwmInvalidateIconicBitmaps.Call( + uintptr(hWnd)) + return HRESULT(ret) +} + +func DwmIsCompositionEnabled(pfEnabled *BOOL) HRESULT { + ret, _, _ := procDwmIsCompositionEnabled.Call( + uintptr(unsafe.Pointer(pfEnabled))) + return HRESULT(ret) +} + +func DwmModifyPreviousDxFrameDuration(hWnd HWND, cRefreshes int, fRelative bool) HRESULT { + ret, _, _ := procDwmModifyPreviousDxFrameDuration.Call( + uintptr(hWnd), + uintptr(cRefreshes), + uintptr(BoolToBOOL(fRelative))) + return HRESULT(ret) +} + +func DwmQueryThumbnailSourceSize(hThumbnail HTHUMBNAIL, pSize *SIZE) HRESULT { + ret, _, _ := procDwmQueryThumbnailSourceSize.Call( + uintptr(hThumbnail), + uintptr(unsafe.Pointer(pSize))) + return HRESULT(ret) +} + +func DwmRegisterThumbnail(hWndDestination HWND, hWndSource HWND, phThumbnailId *HTHUMBNAIL) HRESULT { + ret, _, _ := procDwmRegisterThumbnail.Call( + uintptr(hWndDestination), + uintptr(hWndSource), + uintptr(unsafe.Pointer(phThumbnailId))) + return HRESULT(ret) +} + +func DwmRenderGesture(gt GESTURE_TYPE, cContacts uint, pdwPointerID *uint32, pPoints *POINT) { + procDwmRenderGesture.Call( + uintptr(gt), + uintptr(cContacts), + uintptr(unsafe.Pointer(pdwPointerID)), + uintptr(unsafe.Pointer(pPoints))) + return +} + +func DwmSetDxFrameDuration(hWnd HWND, cRefreshes int) HRESULT { + ret, _, _ := procDwmSetDxFrameDuration.Call( + uintptr(hWnd), + uintptr(cRefreshes)) + return HRESULT(ret) +} + +func DwmSetIconicLivePreviewBitmap(hWnd HWND, hbmp HBITMAP, pptClient *POINT, dwSITFlags uint32) HRESULT { + ret, _, _ := procDwmSetIconicLivePreviewBitmap.Call( + uintptr(hWnd), + uintptr(hbmp), + uintptr(unsafe.Pointer(pptClient)), + uintptr(dwSITFlags)) + return HRESULT(ret) +} + +func DwmSetIconicThumbnail(hWnd HWND, hbmp HBITMAP, dwSITFlags uint32) HRESULT { + ret, _, _ := procDwmSetIconicThumbnail.Call( + uintptr(hWnd), + uintptr(hbmp), + uintptr(dwSITFlags)) + return HRESULT(ret) +} + +func DwmSetPresentParameters(hWnd HWND, pPresentParams *DWM_PRESENT_PARAMETERS) HRESULT { + ret, _, _ := procDwmSetPresentParameters.Call( + uintptr(hWnd), + uintptr(unsafe.Pointer(pPresentParams))) + return HRESULT(ret) +} + +func DwmSetWindowAttribute(hWnd HWND, dwAttribute uint32, pvAttribute LPCVOID, cbAttribute uint32) HRESULT { + ret, _, _ := procDwmSetWindowAttribute.Call( + uintptr(hWnd), + uintptr(dwAttribute), + uintptr(pvAttribute), + uintptr(cbAttribute)) + return HRESULT(ret) +} + +func DwmShowContact(dwPointerID uint32, eShowContact DWM_SHOWCONTACT) { + procDwmShowContact.Call( + uintptr(dwPointerID), + uintptr(eShowContact)) + return +} + +func DwmTetherContact(dwPointerID uint32, fEnable bool, ptTether POINT) { + procDwmTetherContact.Call( + uintptr(dwPointerID), + uintptr(BoolToBOOL(fEnable)), + uintptr(unsafe.Pointer(&ptTether))) + return +} + +func DwmTransitionOwnedWindow(hWnd HWND, target DWMTRANSITION_OWNEDWINDOW_TARGET) { + procDwmTransitionOwnedWindow.Call( + uintptr(hWnd), + uintptr(target)) + return +} + +func DwmUnregisterThumbnail(hThumbnailId HTHUMBNAIL) HRESULT { + ret, _, _ := procDwmUnregisterThumbnail.Call( + uintptr(hThumbnailId)) + return HRESULT(ret) +} + +func DwmUpdateThumbnailProperties(hThumbnailId HTHUMBNAIL, ptnProperties *DWM_THUMBNAIL_PROPERTIES) HRESULT { + ret, _, _ := procDwmUpdateThumbnailProperties.Call( + uintptr(hThumbnailId), + uintptr(unsafe.Pointer(ptnProperties))) + return HRESULT(ret) +} diff --git a/vendor/github.com/shirou/w32/gdi32.go b/vendor/github.com/shirou/w32/gdi32.go new file mode 100644 index 0000000..34f032c --- /dev/null +++ b/vendor/github.com/shirou/w32/gdi32.go @@ -0,0 +1,511 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modgdi32 = syscall.NewLazyDLL("gdi32.dll") + + procGetDeviceCaps = modgdi32.NewProc("GetDeviceCaps") + procDeleteObject = modgdi32.NewProc("DeleteObject") + procCreateFontIndirect = modgdi32.NewProc("CreateFontIndirectW") + procAbortDoc = modgdi32.NewProc("AbortDoc") + procBitBlt = modgdi32.NewProc("BitBlt") + procCloseEnhMetaFile = modgdi32.NewProc("CloseEnhMetaFile") + procCopyEnhMetaFile = modgdi32.NewProc("CopyEnhMetaFileW") + procCreateBrushIndirect = modgdi32.NewProc("CreateBrushIndirect") + procCreateCompatibleDC = modgdi32.NewProc("CreateCompatibleDC") + procCreateDC = modgdi32.NewProc("CreateDCW") + procCreateDIBSection = modgdi32.NewProc("CreateDIBSection") + procCreateEnhMetaFile = modgdi32.NewProc("CreateEnhMetaFileW") + procCreateIC = modgdi32.NewProc("CreateICW") + procDeleteDC = modgdi32.NewProc("DeleteDC") + procDeleteEnhMetaFile = modgdi32.NewProc("DeleteEnhMetaFile") + procEllipse = modgdi32.NewProc("Ellipse") + procEndDoc = modgdi32.NewProc("EndDoc") + procEndPage = modgdi32.NewProc("EndPage") + procExtCreatePen = modgdi32.NewProc("ExtCreatePen") + procGetEnhMetaFile = modgdi32.NewProc("GetEnhMetaFileW") + procGetEnhMetaFileHeader = modgdi32.NewProc("GetEnhMetaFileHeader") + procGetObject = modgdi32.NewProc("GetObjectW") + procGetStockObject = modgdi32.NewProc("GetStockObject") + procGetTextExtentExPoint = modgdi32.NewProc("GetTextExtentExPointW") + procGetTextExtentPoint32 = modgdi32.NewProc("GetTextExtentPoint32W") + procGetTextMetrics = modgdi32.NewProc("GetTextMetricsW") + procLineTo = modgdi32.NewProc("LineTo") + procMoveToEx = modgdi32.NewProc("MoveToEx") + procPlayEnhMetaFile = modgdi32.NewProc("PlayEnhMetaFile") + procRectangle = modgdi32.NewProc("Rectangle") + procResetDC = modgdi32.NewProc("ResetDCW") + procSelectObject = modgdi32.NewProc("SelectObject") + procSetBkMode = modgdi32.NewProc("SetBkMode") + procSetBrushOrgEx = modgdi32.NewProc("SetBrushOrgEx") + procSetStretchBltMode = modgdi32.NewProc("SetStretchBltMode") + procSetTextColor = modgdi32.NewProc("SetTextColor") + procSetBkColor = modgdi32.NewProc("SetBkColor") + procStartDoc = modgdi32.NewProc("StartDocW") + procStartPage = modgdi32.NewProc("StartPage") + procStretchBlt = modgdi32.NewProc("StretchBlt") + procSetDIBitsToDevice = modgdi32.NewProc("SetDIBitsToDevice") + procChoosePixelFormat = modgdi32.NewProc("ChoosePixelFormat") + procDescribePixelFormat = modgdi32.NewProc("DescribePixelFormat") + procGetEnhMetaFilePixelFormat = modgdi32.NewProc("GetEnhMetaFilePixelFormat") + procGetPixelFormat = modgdi32.NewProc("GetPixelFormat") + procSetPixelFormat = modgdi32.NewProc("SetPixelFormat") + procSwapBuffers = modgdi32.NewProc("SwapBuffers") +) + +func GetDeviceCaps(hdc HDC, index int) int { + ret, _, _ := procGetDeviceCaps.Call( + uintptr(hdc), + uintptr(index)) + + return int(ret) +} + +func DeleteObject(hObject HGDIOBJ) bool { + ret, _, _ := procDeleteObject.Call( + uintptr(hObject)) + + return ret != 0 +} + +func CreateFontIndirect(logFont *LOGFONT) HFONT { + ret, _, _ := procCreateFontIndirect.Call( + uintptr(unsafe.Pointer(logFont))) + + return HFONT(ret) +} + +func AbortDoc(hdc HDC) int { + ret, _, _ := procAbortDoc.Call( + uintptr(hdc)) + + return int(ret) +} + +func BitBlt(hdcDest HDC, nXDest, nYDest, nWidth, nHeight int, hdcSrc HDC, nXSrc, nYSrc int, dwRop uint) { + ret, _, _ := procBitBlt.Call( + uintptr(hdcDest), + uintptr(nXDest), + uintptr(nYDest), + uintptr(nWidth), + uintptr(nHeight), + uintptr(hdcSrc), + uintptr(nXSrc), + uintptr(nYSrc), + uintptr(dwRop)) + + if ret == 0 { + panic("BitBlt failed") + } +} + +func CloseEnhMetaFile(hdc HDC) HENHMETAFILE { + ret, _, _ := procCloseEnhMetaFile.Call( + uintptr(hdc)) + + return HENHMETAFILE(ret) +} + +func CopyEnhMetaFile(hemfSrc HENHMETAFILE, lpszFile *uint16) HENHMETAFILE { + ret, _, _ := procCopyEnhMetaFile.Call( + uintptr(hemfSrc), + uintptr(unsafe.Pointer(lpszFile))) + + return HENHMETAFILE(ret) +} + +func CreateBrushIndirect(lplb *LOGBRUSH) HBRUSH { + ret, _, _ := procCreateBrushIndirect.Call( + uintptr(unsafe.Pointer(lplb))) + + return HBRUSH(ret) +} + +func CreateCompatibleDC(hdc HDC) HDC { + ret, _, _ := procCreateCompatibleDC.Call( + uintptr(hdc)) + + if ret == 0 { + panic("Create compatible DC failed") + } + + return HDC(ret) +} + +func CreateDC(lpszDriver, lpszDevice, lpszOutput *uint16, lpInitData *DEVMODE) HDC { + ret, _, _ := procCreateDC.Call( + uintptr(unsafe.Pointer(lpszDriver)), + uintptr(unsafe.Pointer(lpszDevice)), + uintptr(unsafe.Pointer(lpszOutput)), + uintptr(unsafe.Pointer(lpInitData))) + + return HDC(ret) +} + +func CreateDIBSection(hdc HDC, pbmi *BITMAPINFO, iUsage uint, ppvBits *unsafe.Pointer, hSection HANDLE, dwOffset uint) HBITMAP { + ret, _, _ := procCreateDIBSection.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(pbmi)), + uintptr(iUsage), + uintptr(unsafe.Pointer(ppvBits)), + uintptr(hSection), + uintptr(dwOffset)) + + return HBITMAP(ret) +} + +func CreateEnhMetaFile(hdcRef HDC, lpFilename *uint16, lpRect *RECT, lpDescription *uint16) HDC { + ret, _, _ := procCreateEnhMetaFile.Call( + uintptr(hdcRef), + uintptr(unsafe.Pointer(lpFilename)), + uintptr(unsafe.Pointer(lpRect)), + uintptr(unsafe.Pointer(lpDescription))) + + return HDC(ret) +} + +func CreateIC(lpszDriver, lpszDevice, lpszOutput *uint16, lpdvmInit *DEVMODE) HDC { + ret, _, _ := procCreateIC.Call( + uintptr(unsafe.Pointer(lpszDriver)), + uintptr(unsafe.Pointer(lpszDevice)), + uintptr(unsafe.Pointer(lpszOutput)), + uintptr(unsafe.Pointer(lpdvmInit))) + + return HDC(ret) +} + +func DeleteDC(hdc HDC) bool { + ret, _, _ := procDeleteDC.Call( + uintptr(hdc)) + + return ret != 0 +} + +func DeleteEnhMetaFile(hemf HENHMETAFILE) bool { + ret, _, _ := procDeleteEnhMetaFile.Call( + uintptr(hemf)) + + return ret != 0 +} + +func Ellipse(hdc HDC, nLeftRect, nTopRect, nRightRect, nBottomRect int) bool { + ret, _, _ := procEllipse.Call( + uintptr(hdc), + uintptr(nLeftRect), + uintptr(nTopRect), + uintptr(nRightRect), + uintptr(nBottomRect)) + + return ret != 0 +} + +func EndDoc(hdc HDC) int { + ret, _, _ := procEndDoc.Call( + uintptr(hdc)) + + return int(ret) +} + +func EndPage(hdc HDC) int { + ret, _, _ := procEndPage.Call( + uintptr(hdc)) + + return int(ret) +} + +func ExtCreatePen(dwPenStyle, dwWidth uint, lplb *LOGBRUSH, dwStyleCount uint, lpStyle *uint) HPEN { + ret, _, _ := procExtCreatePen.Call( + uintptr(dwPenStyle), + uintptr(dwWidth), + uintptr(unsafe.Pointer(lplb)), + uintptr(dwStyleCount), + uintptr(unsafe.Pointer(lpStyle))) + + return HPEN(ret) +} + +func GetEnhMetaFile(lpszMetaFile *uint16) HENHMETAFILE { + ret, _, _ := procGetEnhMetaFile.Call( + uintptr(unsafe.Pointer(lpszMetaFile))) + + return HENHMETAFILE(ret) +} + +func GetEnhMetaFileHeader(hemf HENHMETAFILE, cbBuffer uint, lpemh *ENHMETAHEADER) uint { + ret, _, _ := procGetEnhMetaFileHeader.Call( + uintptr(hemf), + uintptr(cbBuffer), + uintptr(unsafe.Pointer(lpemh))) + + return uint(ret) +} + +func GetObject(hgdiobj HGDIOBJ, cbBuffer uintptr, lpvObject unsafe.Pointer) int { + ret, _, _ := procGetObject.Call( + uintptr(hgdiobj), + uintptr(cbBuffer), + uintptr(lpvObject)) + + return int(ret) +} + +func GetStockObject(fnObject int) HGDIOBJ { + ret, _, _ := procGetDeviceCaps.Call( + uintptr(fnObject)) + + return HGDIOBJ(ret) +} + +func GetTextExtentExPoint(hdc HDC, lpszStr *uint16, cchString, nMaxExtent int, lpnFit, alpDx *int, lpSize *SIZE) bool { + ret, _, _ := procGetTextExtentExPoint.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(lpszStr)), + uintptr(cchString), + uintptr(nMaxExtent), + uintptr(unsafe.Pointer(lpnFit)), + uintptr(unsafe.Pointer(alpDx)), + uintptr(unsafe.Pointer(lpSize))) + + return ret != 0 +} + +func GetTextExtentPoint32(hdc HDC, lpString *uint16, c int, lpSize *SIZE) bool { + ret, _, _ := procGetTextExtentPoint32.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(lpString)), + uintptr(c), + uintptr(unsafe.Pointer(lpSize))) + + return ret != 0 +} + +func GetTextMetrics(hdc HDC, lptm *TEXTMETRIC) bool { + ret, _, _ := procGetTextMetrics.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(lptm))) + + return ret != 0 +} + +func LineTo(hdc HDC, nXEnd, nYEnd int) bool { + ret, _, _ := procLineTo.Call( + uintptr(hdc), + uintptr(nXEnd), + uintptr(nYEnd)) + + return ret != 0 +} + +func MoveToEx(hdc HDC, x, y int, lpPoint *POINT) bool { + ret, _, _ := procMoveToEx.Call( + uintptr(hdc), + uintptr(x), + uintptr(y), + uintptr(unsafe.Pointer(lpPoint))) + + return ret != 0 +} + +func PlayEnhMetaFile(hdc HDC, hemf HENHMETAFILE, lpRect *RECT) bool { + ret, _, _ := procPlayEnhMetaFile.Call( + uintptr(hdc), + uintptr(hemf), + uintptr(unsafe.Pointer(lpRect))) + + return ret != 0 +} + +func Rectangle(hdc HDC, nLeftRect, nTopRect, nRightRect, nBottomRect int) bool { + ret, _, _ := procRectangle.Call( + uintptr(hdc), + uintptr(nLeftRect), + uintptr(nTopRect), + uintptr(nRightRect), + uintptr(nBottomRect)) + + return ret != 0 +} + +func ResetDC(hdc HDC, lpInitData *DEVMODE) HDC { + ret, _, _ := procResetDC.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(lpInitData))) + + return HDC(ret) +} + +func SelectObject(hdc HDC, hgdiobj HGDIOBJ) HGDIOBJ { + ret, _, _ := procSelectObject.Call( + uintptr(hdc), + uintptr(hgdiobj)) + + if ret == 0 { + panic("SelectObject failed") + } + + return HGDIOBJ(ret) +} + +func SetBkMode(hdc HDC, iBkMode int) int { + ret, _, _ := procSetBkMode.Call( + uintptr(hdc), + uintptr(iBkMode)) + + if ret == 0 { + panic("SetBkMode failed") + } + + return int(ret) +} + +func SetBrushOrgEx(hdc HDC, nXOrg, nYOrg int, lppt *POINT) bool { + ret, _, _ := procSetBrushOrgEx.Call( + uintptr(hdc), + uintptr(nXOrg), + uintptr(nYOrg), + uintptr(unsafe.Pointer(lppt))) + + return ret != 0 +} + +func SetStretchBltMode(hdc HDC, iStretchMode int) int { + ret, _, _ := procSetStretchBltMode.Call( + uintptr(hdc), + uintptr(iStretchMode)) + + return int(ret) +} + +func SetTextColor(hdc HDC, crColor COLORREF) COLORREF { + ret, _, _ := procSetTextColor.Call( + uintptr(hdc), + uintptr(crColor)) + + if ret == CLR_INVALID { + panic("SetTextColor failed") + } + + return COLORREF(ret) +} + +func SetBkColor(hdc HDC, crColor COLORREF) COLORREF { + ret, _, _ := procSetBkColor.Call( + uintptr(hdc), + uintptr(crColor)) + + if ret == CLR_INVALID { + panic("SetBkColor failed") + } + + return COLORREF(ret) +} + +func StartDoc(hdc HDC, lpdi *DOCINFO) int { + ret, _, _ := procStartDoc.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(lpdi))) + + return int(ret) +} + +func StartPage(hdc HDC) int { + ret, _, _ := procStartPage.Call( + uintptr(hdc)) + + return int(ret) +} + +func StretchBlt(hdcDest HDC, nXOriginDest, nYOriginDest, nWidthDest, nHeightDest int, hdcSrc HDC, nXOriginSrc, nYOriginSrc, nWidthSrc, nHeightSrc int, dwRop uint) { + ret, _, _ := procStretchBlt.Call( + uintptr(hdcDest), + uintptr(nXOriginDest), + uintptr(nYOriginDest), + uintptr(nWidthDest), + uintptr(nHeightDest), + uintptr(hdcSrc), + uintptr(nXOriginSrc), + uintptr(nYOriginSrc), + uintptr(nWidthSrc), + uintptr(nHeightSrc), + uintptr(dwRop)) + + if ret == 0 { + panic("StretchBlt failed") + } +} + +func SetDIBitsToDevice(hdc HDC, xDest, yDest, dwWidth, dwHeight, xSrc, ySrc int, uStartScan, cScanLines uint, lpvBits []byte, lpbmi *BITMAPINFO, fuColorUse uint) int { + ret, _, _ := procSetDIBitsToDevice.Call( + uintptr(hdc), + uintptr(xDest), + uintptr(yDest), + uintptr(dwWidth), + uintptr(dwHeight), + uintptr(xSrc), + uintptr(ySrc), + uintptr(uStartScan), + uintptr(cScanLines), + uintptr(unsafe.Pointer(&lpvBits[0])), + uintptr(unsafe.Pointer(lpbmi)), + uintptr(fuColorUse)) + + return int(ret) +} + +func ChoosePixelFormat(hdc HDC, pfd *PIXELFORMATDESCRIPTOR) int { + ret, _, _ := procChoosePixelFormat.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(pfd)), + ) + return int(ret) +} + +func DescribePixelFormat(hdc HDC, iPixelFormat int, nBytes uint, pfd *PIXELFORMATDESCRIPTOR) int { + ret, _, _ := procDescribePixelFormat.Call( + uintptr(hdc), + uintptr(iPixelFormat), + uintptr(nBytes), + uintptr(unsafe.Pointer(pfd)), + ) + return int(ret) +} + +func GetEnhMetaFilePixelFormat(hemf HENHMETAFILE, cbBuffer uint32, pfd *PIXELFORMATDESCRIPTOR) uint { + ret, _, _ := procGetEnhMetaFilePixelFormat.Call( + uintptr(hemf), + uintptr(cbBuffer), + uintptr(unsafe.Pointer(pfd)), + ) + return uint(ret) +} + +func GetPixelFormat(hdc HDC) int { + ret, _, _ := procGetPixelFormat.Call( + uintptr(hdc), + ) + return int(ret) +} + +func SetPixelFormat(hdc HDC, iPixelFormat int, pfd *PIXELFORMATDESCRIPTOR) bool { + ret, _, _ := procSetPixelFormat.Call( + uintptr(hdc), + uintptr(iPixelFormat), + uintptr(unsafe.Pointer(pfd)), + ) + return ret == TRUE +} + +func SwapBuffers(hdc HDC) bool { + ret, _, _ := procSwapBuffers.Call(uintptr(hdc)) + return ret == TRUE +} diff --git a/vendor/github.com/shirou/w32/gdiplus.go b/vendor/github.com/shirou/w32/gdiplus.go new file mode 100644 index 0000000..443334b --- /dev/null +++ b/vendor/github.com/shirou/w32/gdiplus.go @@ -0,0 +1,177 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "errors" + "fmt" + "syscall" + "unsafe" +) + +const ( + Ok = 0 + GenericError = 1 + InvalidParameter = 2 + OutOfMemory = 3 + ObjectBusy = 4 + InsufficientBuffer = 5 + NotImplemented = 6 + Win32Error = 7 + WrongState = 8 + Aborted = 9 + FileNotFound = 10 + ValueOverflow = 11 + AccessDenied = 12 + UnknownImageFormat = 13 + FontFamilyNotFound = 14 + FontStyleNotFound = 15 + NotTrueTypeFont = 16 + UnsupportedGdiplusVersion = 17 + GdiplusNotInitialized = 18 + PropertyNotFound = 19 + PropertyNotSupported = 20 + ProfileNotFound = 21 +) + +func GetGpStatus(s int32) string { + switch s { + case Ok: + return "Ok" + case GenericError: + return "GenericError" + case InvalidParameter: + return "InvalidParameter" + case OutOfMemory: + return "OutOfMemory" + case ObjectBusy: + return "ObjectBusy" + case InsufficientBuffer: + return "InsufficientBuffer" + case NotImplemented: + return "NotImplemented" + case Win32Error: + return "Win32Error" + case WrongState: + return "WrongState" + case Aborted: + return "Aborted" + case FileNotFound: + return "FileNotFound" + case ValueOverflow: + return "ValueOverflow" + case AccessDenied: + return "AccessDenied" + case UnknownImageFormat: + return "UnknownImageFormat" + case FontFamilyNotFound: + return "FontFamilyNotFound" + case FontStyleNotFound: + return "FontStyleNotFound" + case NotTrueTypeFont: + return "NotTrueTypeFont" + case UnsupportedGdiplusVersion: + return "UnsupportedGdiplusVersion" + case GdiplusNotInitialized: + return "GdiplusNotInitialized" + case PropertyNotFound: + return "PropertyNotFound" + case PropertyNotSupported: + return "PropertyNotSupported" + case ProfileNotFound: + return "ProfileNotFound" + } + return "Unknown Status Value" +} + +var ( + token uintptr + + modgdiplus = syscall.NewLazyDLL("gdiplus.dll") + + procGdipCreateBitmapFromFile = modgdiplus.NewProc("GdipCreateBitmapFromFile") + procGdipCreateBitmapFromHBITMAP = modgdiplus.NewProc("GdipCreateBitmapFromHBITMAP") + procGdipCreateHBITMAPFromBitmap = modgdiplus.NewProc("GdipCreateHBITMAPFromBitmap") + procGdipCreateBitmapFromResource = modgdiplus.NewProc("GdipCreateBitmapFromResource") + procGdipCreateBitmapFromStream = modgdiplus.NewProc("GdipCreateBitmapFromStream") + procGdipDisposeImage = modgdiplus.NewProc("GdipDisposeImage") + procGdiplusShutdown = modgdiplus.NewProc("GdiplusShutdown") + procGdiplusStartup = modgdiplus.NewProc("GdiplusStartup") +) + +func GdipCreateBitmapFromFile(filename string) (*uintptr, error) { + var bitmap *uintptr + ret, _, _ := procGdipCreateBitmapFromFile.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(filename))), + uintptr(unsafe.Pointer(&bitmap))) + + if ret != Ok { + return nil, errors.New(fmt.Sprintf("GdipCreateBitmapFromFile failed with status '%s' for file '%s'", GetGpStatus(int32(ret)), filename)) + } + + return bitmap, nil +} + +func GdipCreateBitmapFromResource(instance HINSTANCE, resId *uint16) (*uintptr, error) { + var bitmap *uintptr + ret, _, _ := procGdipCreateBitmapFromResource.Call( + uintptr(instance), + uintptr(unsafe.Pointer(resId)), + uintptr(unsafe.Pointer(&bitmap))) + + if ret != Ok { + return nil, errors.New(fmt.Sprintf("GdiCreateBitmapFromResource failed with status '%s'", GetGpStatus(int32(ret)))) + } + + return bitmap, nil +} + +func GdipCreateBitmapFromStream(stream *IStream) (*uintptr, error) { + var bitmap *uintptr + ret, _, _ := procGdipCreateBitmapFromStream.Call( + uintptr(unsafe.Pointer(stream)), + uintptr(unsafe.Pointer(&bitmap))) + + if ret != Ok { + return nil, errors.New(fmt.Sprintf("GdipCreateBitmapFromStream failed with status '%s'", GetGpStatus(int32(ret)))) + } + + return bitmap, nil +} + +func GdipCreateHBITMAPFromBitmap(bitmap *uintptr, background uint32) (HBITMAP, error) { + var hbitmap HBITMAP + ret, _, _ := procGdipCreateHBITMAPFromBitmap.Call( + uintptr(unsafe.Pointer(bitmap)), + uintptr(unsafe.Pointer(&hbitmap)), + uintptr(background)) + + if ret != Ok { + return 0, errors.New(fmt.Sprintf("GdipCreateHBITMAPFromBitmap failed with status '%s'", GetGpStatus(int32(ret)))) + } + + return hbitmap, nil +} + +func GdipDisposeImage(image *uintptr) { + procGdipDisposeImage.Call(uintptr(unsafe.Pointer(image))) +} + +func GdiplusShutdown() { + procGdiplusShutdown.Call(token) +} + +func GdiplusStartup(input *GdiplusStartupInput, output *GdiplusStartupOutput) { + ret, _, _ := procGdiplusStartup.Call( + uintptr(unsafe.Pointer(&token)), + uintptr(unsafe.Pointer(input)), + uintptr(unsafe.Pointer(output))) + + if ret != Ok { + panic("GdiplusStartup failed with status " + GetGpStatus(int32(ret))) + } +} diff --git a/vendor/github.com/shirou/w32/idispatch.go b/vendor/github.com/shirou/w32/idispatch.go new file mode 100644 index 0000000..d6c2504 --- /dev/null +++ b/vendor/github.com/shirou/w32/idispatch.go @@ -0,0 +1,45 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "unsafe" +) + +type pIDispatchVtbl struct { + pQueryInterface uintptr + pAddRef uintptr + pRelease uintptr + pGetTypeInfoCount uintptr + pGetTypeInfo uintptr + pGetIDsOfNames uintptr + pInvoke uintptr +} + +type IDispatch struct { + lpVtbl *pIDispatchVtbl +} + +func (this *IDispatch) QueryInterface(id *GUID) *IDispatch { + return ComQueryInterface((*IUnknown)(unsafe.Pointer(this)), id) +} + +func (this *IDispatch) AddRef() int32 { + return ComAddRef((*IUnknown)(unsafe.Pointer(this))) +} + +func (this *IDispatch) Release() int32 { + return ComRelease((*IUnknown)(unsafe.Pointer(this))) +} + +func (this *IDispatch) GetIDsOfName(names []string) []int32 { + return ComGetIDsOfName(this, names) +} + +func (this *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) *VARIANT { + return ComInvoke(this, dispid, dispatch, params...) +} diff --git a/vendor/github.com/shirou/w32/istream.go b/vendor/github.com/shirou/w32/istream.go new file mode 100644 index 0000000..0bb2822 --- /dev/null +++ b/vendor/github.com/shirou/w32/istream.go @@ -0,0 +1,33 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "unsafe" +) + +type pIStreamVtbl struct { + pQueryInterface uintptr + pAddRef uintptr + pRelease uintptr +} + +type IStream struct { + lpVtbl *pIStreamVtbl +} + +func (this *IStream) QueryInterface(id *GUID) *IDispatch { + return ComQueryInterface((*IUnknown)(unsafe.Pointer(this)), id) +} + +func (this *IStream) AddRef() int32 { + return ComAddRef((*IUnknown)(unsafe.Pointer(this))) +} + +func (this *IStream) Release() int32 { + return ComRelease((*IUnknown)(unsafe.Pointer(this))) +} diff --git a/vendor/github.com/shirou/w32/iunknown.go b/vendor/github.com/shirou/w32/iunknown.go new file mode 100644 index 0000000..847fba7 --- /dev/null +++ b/vendor/github.com/shirou/w32/iunknown.go @@ -0,0 +1,29 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +type pIUnknownVtbl struct { + pQueryInterface uintptr + pAddRef uintptr + pRelease uintptr +} + +type IUnknown struct { + lpVtbl *pIUnknownVtbl +} + +func (this *IUnknown) QueryInterface(id *GUID) *IDispatch { + return ComQueryInterface(this, id) +} + +func (this *IUnknown) AddRef() int32 { + return ComAddRef(this) +} + +func (this *IUnknown) Release() int32 { + return ComRelease(this) +} diff --git a/vendor/github.com/shirou/w32/kernel32.go b/vendor/github.com/shirou/w32/kernel32.go new file mode 100644 index 0000000..5d5b4d8 --- /dev/null +++ b/vendor/github.com/shirou/w32/kernel32.go @@ -0,0 +1,316 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetModuleHandle = modkernel32.NewProc("GetModuleHandleW") + procMulDiv = modkernel32.NewProc("MulDiv") + procGetConsoleWindow = modkernel32.NewProc("GetConsoleWindow") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetUserDefaultLCID = modkernel32.NewProc("GetUserDefaultLCID") + procLstrlen = modkernel32.NewProc("lstrlenW") + procLstrcpy = modkernel32.NewProc("lstrcpyW") + procGlobalAlloc = modkernel32.NewProc("GlobalAlloc") + procGlobalFree = modkernel32.NewProc("GlobalFree") + procGlobalLock = modkernel32.NewProc("GlobalLock") + procGlobalUnlock = modkernel32.NewProc("GlobalUnlock") + procMoveMemory = modkernel32.NewProc("RtlMoveMemory") + procFindResource = modkernel32.NewProc("FindResourceW") + procSizeofResource = modkernel32.NewProc("SizeofResource") + procLockResource = modkernel32.NewProc("LockResource") + procLoadResource = modkernel32.NewProc("LoadResource") + procGetLastError = modkernel32.NewProc("GetLastError") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") + procModule32First = modkernel32.NewProc("Module32FirstW") + procModule32Next = modkernel32.NewProc("Module32NextW") + procProcess32First = modkernel32.NewProc("Process32FirstW") + procProcess32Next = modkernel32.NewProc("Process32NextW") + procGetSystemTimes = modkernel32.NewProc("GetSystemTimes") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = modkernel32.NewProc("SetConsoleTextAttribute") + procGetDiskFreeSpaceEx = modkernel32.NewProc("GetDiskFreeSpaceExW") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") +) + +func GetModuleHandle(modulename string) HINSTANCE { + var mn uintptr + if modulename == "" { + mn = 0 + } else { + mn = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(modulename))) + } + ret, _, _ := procGetModuleHandle.Call(mn) + return HINSTANCE(ret) +} + +func MulDiv(number, numerator, denominator int) int { + ret, _, _ := procMulDiv.Call( + uintptr(number), + uintptr(numerator), + uintptr(denominator)) + + return int(ret) +} + +func GetConsoleWindow() HWND { + ret, _, _ := procGetConsoleWindow.Call() + + return HWND(ret) +} + +func GetCurrentThread() HANDLE { + ret, _, _ := procGetCurrentThread.Call() + + return HANDLE(ret) +} + +func GetLogicalDrives() uint32 { + ret, _, _ := procGetLogicalDrives.Call() + + return uint32(ret) +} + +func GetUserDefaultLCID() uint32 { + ret, _, _ := procGetUserDefaultLCID.Call() + + return uint32(ret) +} + +func Lstrlen(lpString *uint16) int { + ret, _, _ := procLstrlen.Call(uintptr(unsafe.Pointer(lpString))) + + return int(ret) +} + +func Lstrcpy(buf []uint16, lpString *uint16) { + procLstrcpy.Call( + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(lpString))) +} + +func GlobalAlloc(uFlags uint, dwBytes uint32) HGLOBAL { + ret, _, _ := procGlobalAlloc.Call( + uintptr(uFlags), + uintptr(dwBytes)) + + if ret == 0 { + panic("GlobalAlloc failed") + } + + return HGLOBAL(ret) +} + +func GlobalFree(hMem HGLOBAL) { + ret, _, _ := procGlobalFree.Call(uintptr(hMem)) + + if ret != 0 { + panic("GlobalFree failed") + } +} + +func GlobalLock(hMem HGLOBAL) unsafe.Pointer { + ret, _, _ := procGlobalLock.Call(uintptr(hMem)) + + if ret == 0 { + panic("GlobalLock failed") + } + + return unsafe.Pointer(ret) +} + +func GlobalUnlock(hMem HGLOBAL) bool { + ret, _, _ := procGlobalUnlock.Call(uintptr(hMem)) + + return ret != 0 +} + +func MoveMemory(destination, source unsafe.Pointer, length uint32) { + procMoveMemory.Call( + uintptr(unsafe.Pointer(destination)), + uintptr(source), + uintptr(length)) +} + +func FindResource(hModule HMODULE, lpName, lpType *uint16) (HRSRC, error) { + ret, _, _ := procFindResource.Call( + uintptr(hModule), + uintptr(unsafe.Pointer(lpName)), + uintptr(unsafe.Pointer(lpType))) + + if ret == 0 { + return 0, syscall.GetLastError() + } + + return HRSRC(ret), nil +} + +func SizeofResource(hModule HMODULE, hResInfo HRSRC) uint32 { + ret, _, _ := procSizeofResource.Call( + uintptr(hModule), + uintptr(hResInfo)) + + if ret == 0 { + panic("SizeofResource failed") + } + + return uint32(ret) +} + +func LockResource(hResData HGLOBAL) unsafe.Pointer { + ret, _, _ := procLockResource.Call(uintptr(hResData)) + + if ret == 0 { + panic("LockResource failed") + } + + return unsafe.Pointer(ret) +} + +func LoadResource(hModule HMODULE, hResInfo HRSRC) HGLOBAL { + ret, _, _ := procLoadResource.Call( + uintptr(hModule), + uintptr(hResInfo)) + + if ret == 0 { + panic("LoadResource failed") + } + + return HGLOBAL(ret) +} + +func GetLastError() uint32 { + ret, _, _ := procGetLastError.Call() + return uint32(ret) +} + +func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) HANDLE { + inherit := 0 + if inheritHandle { + inherit = 1 + } + + ret, _, _ := procOpenProcess.Call( + uintptr(desiredAccess), + uintptr(inherit), + uintptr(processId)) + return HANDLE(ret) +} + +func TerminateProcess(hProcess HANDLE, uExitCode uint) bool { + ret, _, _ := procTerminateProcess.Call( + uintptr(hProcess), + uintptr(uExitCode)) + return ret != 0 +} + +func CloseHandle(object HANDLE) bool { + ret, _, _ := procCloseHandle.Call( + uintptr(object)) + return ret != 0 +} + +func CreateToolhelp32Snapshot(flags, processId uint32) HANDLE { + ret, _, _ := procCreateToolhelp32Snapshot.Call( + uintptr(flags), + uintptr(processId)) + + if ret <= 0 { + return HANDLE(0) + } + + return HANDLE(ret) +} + +func Module32First(snapshot HANDLE, me *MODULEENTRY32) bool { + ret, _, _ := procModule32First.Call( + uintptr(snapshot), + uintptr(unsafe.Pointer(me))) + + return ret != 0 +} + +func Module32Next(snapshot HANDLE, me *MODULEENTRY32) bool { + ret, _, _ := procModule32Next.Call( + uintptr(snapshot), + uintptr(unsafe.Pointer(me))) + + return ret != 0 +} +func Process32First(snapshot HANDLE, pe *PROCESSENTRY32) bool { + ret, _, _ := procProcess32First.Call( + uintptr(snapshot), + uintptr(unsafe.Pointer(pe))) + + return ret != 0 +} + +func Process32Next(snapshot HANDLE, pe *PROCESSENTRY32) bool { + ret, _, _ := procProcess32Next.Call( + uintptr(snapshot), + uintptr(unsafe.Pointer(pe))) + + return ret != 0 +} +func GetSystemTimes(lpIdleTime, lpKernelTime, lpUserTime *FILETIME) bool { + ret, _, _ := procGetSystemTimes.Call( + uintptr(unsafe.Pointer(lpIdleTime)), + uintptr(unsafe.Pointer(lpKernelTime)), + uintptr(unsafe.Pointer(lpUserTime))) + + return ret != 0 +} + +func GetProcessTimes(hProcess HANDLE, lpCreationTime, lpExitTime, lpKernelTime, lpUserTime *FILETIME) bool { + ret, _, _ := procGetProcessTimes.Call( + uintptr(hProcess), + uintptr(unsafe.Pointer(lpCreationTime)), + uintptr(unsafe.Pointer(lpExitTime)), + uintptr(unsafe.Pointer(lpKernelTime)), + uintptr(unsafe.Pointer(lpUserTime))) + + return ret != 0 +} + +func GetConsoleScreenBufferInfo(hConsoleOutput HANDLE) *CONSOLE_SCREEN_BUFFER_INFO { + var csbi CONSOLE_SCREEN_BUFFER_INFO + ret, _, _ := procGetConsoleScreenBufferInfo.Call( + uintptr(hConsoleOutput), + uintptr(unsafe.Pointer(&csbi))) + if ret == 0 { + return nil + } + return &csbi +} + +func SetConsoleTextAttribute(hConsoleOutput HANDLE, wAttributes uint16) bool { + ret, _, _ := procSetConsoleTextAttribute.Call( + uintptr(hConsoleOutput), + uintptr(wAttributes)) + return ret != 0 +} + +func GetDiskFreeSpaceEx(dirName string) (r bool, + freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes uint64) { + ret, _, _ := procGetDiskFreeSpaceEx.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(dirName))), + uintptr(unsafe.Pointer(&freeBytesAvailable)), + uintptr(unsafe.Pointer(&totalNumberOfBytes)), + uintptr(unsafe.Pointer(&totalNumberOfFreeBytes))) + return ret != 0, + freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes +} diff --git a/vendor/github.com/shirou/w32/ole32.go b/vendor/github.com/shirou/w32/ole32.go new file mode 100644 index 0000000..4858984 --- /dev/null +++ b/vendor/github.com/shirou/w32/ole32.go @@ -0,0 +1,65 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modole32 = syscall.NewLazyDLL("ole32.dll") + + procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoInitialize = modole32.NewProc("CoInitialize") + procCoUninitialize = modole32.NewProc("CoUninitialize") + procCreateStreamOnHGlobal = modole32.NewProc("CreateStreamOnHGlobal") +) + +func CoInitializeEx(coInit uintptr) HRESULT { + ret, _, _ := procCoInitializeEx.Call( + 0, + coInit) + + switch uint32(ret) { + case E_INVALIDARG: + panic("CoInitializeEx failed with E_INVALIDARG") + case E_OUTOFMEMORY: + panic("CoInitializeEx failed with E_OUTOFMEMORY") + case E_UNEXPECTED: + panic("CoInitializeEx failed with E_UNEXPECTED") + } + + return HRESULT(ret) +} + +func CoInitialize() { + procCoInitialize.Call(0) +} + +func CoUninitialize() { + procCoUninitialize.Call() +} + +func CreateStreamOnHGlobal(hGlobal HGLOBAL, fDeleteOnRelease bool) *IStream { + stream := new(IStream) + ret, _, _ := procCreateStreamOnHGlobal.Call( + uintptr(hGlobal), + uintptr(BoolToBOOL(fDeleteOnRelease)), + uintptr(unsafe.Pointer(&stream))) + + switch uint32(ret) { + case E_INVALIDARG: + panic("CreateStreamOnHGlobal failed with E_INVALIDARG") + case E_OUTOFMEMORY: + panic("CreateStreamOnHGlobal failed with E_OUTOFMEMORY") + case E_UNEXPECTED: + panic("CreateStreamOnHGlobal failed with E_UNEXPECTED") + } + + return stream +} diff --git a/vendor/github.com/shirou/w32/oleaut32.go b/vendor/github.com/shirou/w32/oleaut32.go new file mode 100644 index 0000000..cdfcb00 --- /dev/null +++ b/vendor/github.com/shirou/w32/oleaut32.go @@ -0,0 +1,50 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modoleaut32 = syscall.NewLazyDLL("oleaut32") + + procVariantInit = modoleaut32.NewProc("VariantInit") + procSysAllocString = modoleaut32.NewProc("SysAllocString") + procSysFreeString = modoleaut32.NewProc("SysFreeString") + procSysStringLen = modoleaut32.NewProc("SysStringLen") + procCreateDispTypeInfo = modoleaut32.NewProc("CreateDispTypeInfo") + procCreateStdDispatch = modoleaut32.NewProc("CreateStdDispatch") +) + +func VariantInit(v *VARIANT) { + hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + panic("Invoke VariantInit error.") + } + return +} + +func SysAllocString(v string) (ss *int16) { + pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v)))) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +func SysFreeString(v *int16) { + hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + panic("Invoke SysFreeString error.") + } + return +} + +func SysStringLen(v *int16) uint { + l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v))) + return uint(l) +} diff --git a/vendor/github.com/shirou/w32/opengl32.go b/vendor/github.com/shirou/w32/opengl32.go new file mode 100644 index 0000000..4f35f19 --- /dev/null +++ b/vendor/github.com/shirou/w32/opengl32.go @@ -0,0 +1,74 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modopengl32 = syscall.NewLazyDLL("opengl32.dll") + + procwglCreateContext = modopengl32.NewProc("wglCreateContext") + procwglCreateLayerContext = modopengl32.NewProc("wglCreateLayerContext") + procwglDeleteContext = modopengl32.NewProc("wglDeleteContext") + procwglGetProcAddress = modopengl32.NewProc("wglGetProcAddress") + procwglMakeCurrent = modopengl32.NewProc("wglMakeCurrent") + procwglShareLists = modopengl32.NewProc("wglShareLists") +) + +func WglCreateContext(hdc HDC) HGLRC { + ret, _, _ := procwglCreateContext.Call( + uintptr(hdc), + ) + + return HGLRC(ret) +} + +func WglCreateLayerContext(hdc HDC, iLayerPlane int) HGLRC { + ret, _, _ := procwglCreateLayerContext.Call( + uintptr(hdc), + uintptr(iLayerPlane), + ) + + return HGLRC(ret) +} + +func WglDeleteContext(hglrc HGLRC) bool { + ret, _, _ := procwglDeleteContext.Call( + uintptr(hglrc), + ) + + return ret == TRUE +} + +func WglGetProcAddress(szProc string) uintptr { + ret, _, _ := procwglGetProcAddress.Call( + uintptr(unsafe.Pointer(syscall.StringBytePtr(szProc))), + ) + + return ret +} + +func WglMakeCurrent(hdc HDC, hglrc HGLRC) bool { + ret, _, _ := procwglMakeCurrent.Call( + uintptr(hdc), + uintptr(hglrc), + ) + + return ret == TRUE +} + +func WglShareLists(hglrc1, hglrc2 HGLRC) bool { + ret, _, _ := procwglShareLists.Call( + uintptr(hglrc1), + uintptr(hglrc2), + ) + + return ret == TRUE +} diff --git a/vendor/github.com/shirou/w32/psapi.go b/vendor/github.com/shirou/w32/psapi.go new file mode 100644 index 0000000..ab7858c --- /dev/null +++ b/vendor/github.com/shirou/w32/psapi.go @@ -0,0 +1,27 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unsafe" +) + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + + procEnumProcesses = modpsapi.NewProc("EnumProcesses") +) + +func EnumProcesses(processIds []uint32, cb uint32, bytesReturned *uint32) bool { + ret, _, _ := procEnumProcesses.Call( + uintptr(unsafe.Pointer(&processIds[0])), + uintptr(cb), + uintptr(unsafe.Pointer(bytesReturned))) + + return ret != 0 +} diff --git a/vendor/github.com/shirou/w32/shell32.go b/vendor/github.com/shirou/w32/shell32.go new file mode 100644 index 0000000..0f5ce8c --- /dev/null +++ b/vendor/github.com/shirou/w32/shell32.go @@ -0,0 +1,155 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "errors" + "fmt" + "syscall" + "unsafe" +) + +var ( + modshell32 = syscall.NewLazyDLL("shell32.dll") + + procSHBrowseForFolder = modshell32.NewProc("SHBrowseForFolderW") + procSHGetPathFromIDList = modshell32.NewProc("SHGetPathFromIDListW") + procDragAcceptFiles = modshell32.NewProc("DragAcceptFiles") + procDragQueryFile = modshell32.NewProc("DragQueryFileW") + procDragQueryPoint = modshell32.NewProc("DragQueryPoint") + procDragFinish = modshell32.NewProc("DragFinish") + procShellExecute = modshell32.NewProc("ShellExecuteW") + procExtractIcon = modshell32.NewProc("ExtractIconW") +) + +func SHBrowseForFolder(bi *BROWSEINFO) uintptr { + ret, _, _ := procSHBrowseForFolder.Call(uintptr(unsafe.Pointer(bi))) + + return ret +} + +func SHGetPathFromIDList(idl uintptr) string { + buf := make([]uint16, 1024) + procSHGetPathFromIDList.Call( + idl, + uintptr(unsafe.Pointer(&buf[0]))) + + return syscall.UTF16ToString(buf) +} + +func DragAcceptFiles(hwnd HWND, accept bool) { + procDragAcceptFiles.Call( + uintptr(hwnd), + uintptr(BoolToBOOL(accept))) +} + +func DragQueryFile(hDrop HDROP, iFile uint) (fileName string, fileCount uint) { + ret, _, _ := procDragQueryFile.Call( + uintptr(hDrop), + uintptr(iFile), + 0, + 0) + + fileCount = uint(ret) + + if iFile != 0xFFFFFFFF { + buf := make([]uint16, fileCount+1) + + ret, _, _ := procDragQueryFile.Call( + uintptr(hDrop), + uintptr(iFile), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(fileCount+1)) + + if ret == 0 { + panic("Invoke DragQueryFile error.") + } + + fileName = syscall.UTF16ToString(buf) + } + + return +} + +func DragQueryPoint(hDrop HDROP) (x, y int, isClientArea bool) { + var pt POINT + ret, _, _ := procDragQueryPoint.Call( + uintptr(hDrop), + uintptr(unsafe.Pointer(&pt))) + + return int(pt.X), int(pt.Y), (ret == 1) +} + +func DragFinish(hDrop HDROP) { + procDragFinish.Call(uintptr(hDrop)) +} + +func ShellExecute(hwnd HWND, lpOperation, lpFile, lpParameters, lpDirectory string, nShowCmd int) error { + var op, param, directory uintptr + if len(lpOperation) != 0 { + op = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpOperation))) + } + if len(lpParameters) != 0 { + param = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpParameters))) + } + if len(lpDirectory) != 0 { + directory = uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpDirectory))) + } + + ret, _, _ := procShellExecute.Call( + uintptr(hwnd), + op, + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpFile))), + param, + directory, + uintptr(nShowCmd)) + + errorMsg := "" + if ret != 0 && ret <= 32 { + switch int(ret) { + case ERROR_FILE_NOT_FOUND: + errorMsg = "The specified file was not found." + case ERROR_PATH_NOT_FOUND: + errorMsg = "The specified path was not found." + case ERROR_BAD_FORMAT: + errorMsg = "The .exe file is invalid (non-Win32 .exe or error in .exe image)." + case SE_ERR_ACCESSDENIED: + errorMsg = "The operating system denied access to the specified file." + case SE_ERR_ASSOCINCOMPLETE: + errorMsg = "The file name association is incomplete or invalid." + case SE_ERR_DDEBUSY: + errorMsg = "The DDE transaction could not be completed because other DDE transactions were being processed." + case SE_ERR_DDEFAIL: + errorMsg = "The DDE transaction failed." + case SE_ERR_DDETIMEOUT: + errorMsg = "The DDE transaction could not be completed because the request timed out." + case SE_ERR_DLLNOTFOUND: + errorMsg = "The specified DLL was not found." + case SE_ERR_NOASSOC: + errorMsg = "There is no application associated with the given file name extension. This error will also be returned if you attempt to print a file that is not printable." + case SE_ERR_OOM: + errorMsg = "There was not enough memory to complete the operation." + case SE_ERR_SHARE: + errorMsg = "A sharing violation occurred." + default: + errorMsg = fmt.Sprintf("Unknown error occurred with error code %v", ret) + } + } else { + return nil + } + + return errors.New(errorMsg) +} + +func ExtractIcon(lpszExeFileName string, nIconIndex int) HICON { + ret, _, _ := procExtractIcon.Call( + 0, + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(lpszExeFileName))), + uintptr(nIconIndex)) + + return HICON(ret) +} diff --git a/vendor/github.com/shirou/w32/typedef.go b/vendor/github.com/shirou/w32/typedef.go new file mode 100644 index 0000000..65f5111 --- /dev/null +++ b/vendor/github.com/shirou/w32/typedef.go @@ -0,0 +1,901 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package w32 + +import ( + "unsafe" +) + +// From MSDN: Windows Data Types +// http://msdn.microsoft.com/en-us/library/s3f49ktz.aspx +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa383751.aspx +// ATOM WORD +// BOOL int32 +// BOOLEAN byte +// BYTE byte +// CCHAR int8 +// CHAR int8 +// COLORREF DWORD +// DWORD uint32 +// DWORDLONG ULONGLONG +// DWORD_PTR ULONG_PTR +// DWORD32 uint32 +// DWORD64 uint64 +// FLOAT float32 +// HACCEL HANDLE +// HALF_PTR struct{} // ??? +// HANDLE PVOID +// HBITMAP HANDLE +// HBRUSH HANDLE +// HCOLORSPACE HANDLE +// HCONV HANDLE +// HCONVLIST HANDLE +// HCURSOR HANDLE +// HDC HANDLE +// HDDEDATA HANDLE +// HDESK HANDLE +// HDROP HANDLE +// HDWP HANDLE +// HENHMETAFILE HANDLE +// HFILE HANDLE +// HFONT HANDLE +// HGDIOBJ HANDLE +// HGLOBAL HANDLE +// HHOOK HANDLE +// HICON HANDLE +// HINSTANCE HANDLE +// HKEY HANDLE +// HKL HANDLE +// HLOCAL HANDLE +// HMENU HANDLE +// HMETAFILE HANDLE +// HMODULE HANDLE +// HPALETTE HANDLE +// HPEN HANDLE +// HRESULT int32 +// HRGN HANDLE +// HSZ HANDLE +// HWINSTA HANDLE +// HWND HANDLE +// INT int32 +// INT_PTR uintptr +// INT8 int8 +// INT16 int16 +// INT32 int32 +// INT64 int64 +// LANGID WORD +// LCID DWORD +// LCTYPE DWORD +// LGRPID DWORD +// LONG int32 +// LONGLONG int64 +// LONG_PTR uintptr +// LONG32 int32 +// LONG64 int64 +// LPARAM LONG_PTR +// LPBOOL *BOOL +// LPBYTE *BYTE +// LPCOLORREF *COLORREF +// LPCSTR *int8 +// LPCTSTR LPCWSTR +// LPCVOID unsafe.Pointer +// LPCWSTR *WCHAR +// LPDWORD *DWORD +// LPHANDLE *HANDLE +// LPINT *INT +// LPLONG *LONG +// LPSTR *CHAR +// LPTSTR LPWSTR +// LPVOID unsafe.Pointer +// LPWORD *WORD +// LPWSTR *WCHAR +// LRESULT LONG_PTR +// PBOOL *BOOL +// PBOOLEAN *BOOLEAN +// PBYTE *BYTE +// PCHAR *CHAR +// PCSTR *CHAR +// PCTSTR PCWSTR +// PCWSTR *WCHAR +// PDWORD *DWORD +// PDWORDLONG *DWORDLONG +// PDWORD_PTR *DWORD_PTR +// PDWORD32 *DWORD32 +// PDWORD64 *DWORD64 +// PFLOAT *FLOAT +// PHALF_PTR *HALF_PTR +// PHANDLE *HANDLE +// PHKEY *HKEY +// PINT_PTR *INT_PTR +// PINT8 *INT8 +// PINT16 *INT16 +// PINT32 *INT32 +// PINT64 *INT64 +// PLCID *LCID +// PLONG *LONG +// PLONGLONG *LONGLONG +// PLONG_PTR *LONG_PTR +// PLONG32 *LONG32 +// PLONG64 *LONG64 +// POINTER_32 struct{} // ??? +// POINTER_64 struct{} // ??? +// POINTER_SIGNED uintptr +// POINTER_UNSIGNED uintptr +// PSHORT *SHORT +// PSIZE_T *SIZE_T +// PSSIZE_T *SSIZE_T +// PSTR *CHAR +// PTBYTE *TBYTE +// PTCHAR *TCHAR +// PTSTR PWSTR +// PUCHAR *UCHAR +// PUHALF_PTR *UHALF_PTR +// PUINT *UINT +// PUINT_PTR *UINT_PTR +// PUINT8 *UINT8 +// PUINT16 *UINT16 +// PUINT32 *UINT32 +// PUINT64 *UINT64 +// PULONG *ULONG +// PULONGLONG *ULONGLONG +// PULONG_PTR *ULONG_PTR +// PULONG32 *ULONG32 +// PULONG64 *ULONG64 +// PUSHORT *USHORT +// PVOID unsafe.Pointer +// PWCHAR *WCHAR +// PWORD *WORD +// PWSTR *WCHAR +// QWORD uint64 +// SC_HANDLE HANDLE +// SC_LOCK LPVOID +// SERVICE_STATUS_HANDLE HANDLE +// SHORT int16 +// SIZE_T ULONG_PTR +// SSIZE_T LONG_PTR +// TBYTE WCHAR +// TCHAR WCHAR +// UCHAR uint8 +// UHALF_PTR struct{} // ??? +// UINT uint32 +// UINT_PTR uintptr +// UINT8 uint8 +// UINT16 uint16 +// UINT32 uint32 +// UINT64 uint64 +// ULONG uint32 +// ULONGLONG uint64 +// ULONG_PTR uintptr +// ULONG32 uint32 +// ULONG64 uint64 +// USHORT uint16 +// USN LONGLONG +// WCHAR uint16 +// WORD uint16 +// WPARAM UINT_PTR +type ( + ATOM uint16 + BOOL int32 + COLORREF uint32 + DWM_FRAME_COUNT uint64 + HACCEL HANDLE + HANDLE uintptr + HBITMAP HANDLE + HBRUSH HANDLE + HCURSOR HANDLE + HDC HANDLE + HDROP HANDLE + HDWP HANDLE + HENHMETAFILE HANDLE + HFONT HANDLE + HGDIOBJ HANDLE + HGLOBAL HANDLE + HGLRC HANDLE + HICON HANDLE + HIMAGELIST HANDLE + HINSTANCE HANDLE + HKEY HANDLE + HKL HANDLE + HMENU HANDLE + HMODULE HANDLE + HMONITOR HANDLE + HPEN HANDLE + HRESULT int32 + HRGN HANDLE + HRSRC HANDLE + HTHUMBNAIL HANDLE + HWND HANDLE + LPCVOID unsafe.Pointer + PVOID unsafe.Pointer + QPC_TIME uint64 +) + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd162805.aspx +type POINT struct { + X, Y int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd162897.aspx +type RECT struct { + Left, Top, Right, Bottom int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms633577.aspx +type WNDCLASSEX struct { + Size uint32 + Style uint32 + WndProc uintptr + ClsExtra int32 + WndExtra int32 + Instance HINSTANCE + Icon HICON + Cursor HCURSOR + Background HBRUSH + MenuName *uint16 + ClassName *uint16 + IconSm HICON +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms644958.aspx +type MSG struct { + Hwnd HWND + Message uint32 + WParam uintptr + LParam uintptr + Time uint32 + Pt POINT +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145037.aspx +type LOGFONT struct { + Height int32 + Width int32 + Escapement int32 + Orientation int32 + Weight int32 + Italic byte + Underline byte + StrikeOut byte + CharSet byte + OutPrecision byte + ClipPrecision byte + Quality byte + PitchAndFamily byte + FaceName [LF_FACESIZE]uint16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms646839.aspx +type OPENFILENAME struct { + StructSize uint32 + Owner HWND + Instance HINSTANCE + Filter *uint16 + CustomFilter *uint16 + MaxCustomFilter uint32 + FilterIndex uint32 + File *uint16 + MaxFile uint32 + FileTitle *uint16 + MaxFileTitle uint32 + InitialDir *uint16 + Title *uint16 + Flags uint32 + FileOffset uint16 + FileExtension uint16 + DefExt *uint16 + CustData uintptr + FnHook uintptr + TemplateName *uint16 + PvReserved unsafe.Pointer + DwReserved uint32 + FlagsEx uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb773205.aspx +type BROWSEINFO struct { + Owner HWND + Root *uint16 + DisplayName *uint16 + Title *uint16 + Flags uint32 + CallbackFunc uintptr + LParam uintptr + Image int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa373931.aspx +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms221627.aspx +type VARIANT struct { + VT uint16 // 2 + WReserved1 uint16 // 4 + WReserved2 uint16 // 6 + WReserved3 uint16 // 8 + Val int64 // 16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms221416.aspx +type DISPPARAMS struct { + Rgvarg uintptr + RgdispidNamedArgs uintptr + CArgs uint32 + CNamedArgs uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms221133.aspx +type EXCEPINFO struct { + WCode uint16 + WReserved uint16 + BstrSource *uint16 + BstrDescription *uint16 + BstrHelpFile *uint16 + DwHelpContext uint32 + PvReserved uintptr + PfnDeferredFillIn uintptr + Scode int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145035.aspx +type LOGBRUSH struct { + LbStyle uint32 + LbColor COLORREF + LbHatch uintptr +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183565.aspx +type DEVMODE struct { + DmDeviceName [CCHDEVICENAME]uint16 + DmSpecVersion uint16 + DmDriverVersion uint16 + DmSize uint16 + DmDriverExtra uint16 + DmFields uint32 + DmOrientation int16 + DmPaperSize int16 + DmPaperLength int16 + DmPaperWidth int16 + DmScale int16 + DmCopies int16 + DmDefaultSource int16 + DmPrintQuality int16 + DmColor int16 + DmDuplex int16 + DmYResolution int16 + DmTTOption int16 + DmCollate int16 + DmFormName [CCHFORMNAME]uint16 + DmLogPixels uint16 + DmBitsPerPel uint32 + DmPelsWidth uint32 + DmPelsHeight uint32 + DmDisplayFlags uint32 + DmDisplayFrequency uint32 + DmICMMethod uint32 + DmICMIntent uint32 + DmMediaType uint32 + DmDitherType uint32 + DmReserved1 uint32 + DmReserved2 uint32 + DmPanningWidth uint32 + DmPanningHeight uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183376.aspx +type BITMAPINFOHEADER struct { + BiSize uint32 + BiWidth int32 + BiHeight int32 + BiPlanes uint16 + BiBitCount uint16 + BiCompression uint32 + BiSizeImage uint32 + BiXPelsPerMeter int32 + BiYPelsPerMeter int32 + BiClrUsed uint32 + BiClrImportant uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd162938.aspx +type RGBQUAD struct { + RgbBlue byte + RgbGreen byte + RgbRed byte + RgbReserved byte +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183375.aspx +type BITMAPINFO struct { + BmiHeader BITMAPINFOHEADER + BmiColors *RGBQUAD +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183371.aspx +type BITMAP struct { + BmType int32 + BmWidth int32 + BmHeight int32 + BmWidthBytes int32 + BmPlanes uint16 + BmBitsPixel uint16 + BmBits unsafe.Pointer +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183567.aspx +type DIBSECTION struct { + DsBm BITMAP + DsBmih BITMAPINFOHEADER + DsBitfields [3]uint32 + DshSection HANDLE + DsOffset uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd162607.aspx +type ENHMETAHEADER struct { + IType uint32 + NSize uint32 + RclBounds RECT + RclFrame RECT + DSignature uint32 + NVersion uint32 + NBytes uint32 + NRecords uint32 + NHandles uint16 + SReserved uint16 + NDescription uint32 + OffDescription uint32 + NPalEntries uint32 + SzlDevice SIZE + SzlMillimeters SIZE + CbPixelFormat uint32 + OffPixelFormat uint32 + BOpenGL uint32 + SzlMicrometers SIZE +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145106.aspx +type SIZE struct { + CX, CY int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145132.aspx +type TEXTMETRIC struct { + TmHeight int32 + TmAscent int32 + TmDescent int32 + TmInternalLeading int32 + TmExternalLeading int32 + TmAveCharWidth int32 + TmMaxCharWidth int32 + TmWeight int32 + TmOverhang int32 + TmDigitizedAspectX int32 + TmDigitizedAspectY int32 + TmFirstChar uint16 + TmLastChar uint16 + TmDefaultChar uint16 + TmBreakChar uint16 + TmItalic byte + TmUnderlined byte + TmStruckOut byte + TmPitchAndFamily byte + TmCharSet byte +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd183574.aspx +type DOCINFO struct { + CbSize int32 + LpszDocName *uint16 + LpszOutput *uint16 + LpszDatatype *uint16 + FwType uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb775514.aspx +type NMHDR struct { + HwndFrom HWND + IdFrom uintptr + Code uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774743.aspx +type LVCOLUMN struct { + Mask uint32 + Fmt int32 + Cx int32 + PszText *uint16 + CchTextMax int32 + ISubItem int32 + IImage int32 + IOrder int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774760.aspx +type LVITEM struct { + Mask uint32 + IItem int32 + ISubItem int32 + State uint32 + StateMask uint32 + PszText *uint16 + CchTextMax int32 + IImage int32 + LParam uintptr + IIndent int32 + IGroupId int32 + CColumns uint32 + PuColumns uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774754.aspx +type LVHITTESTINFO struct { + Pt POINT + Flags uint32 + IItem int32 + ISubItem int32 + IGroup int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774771.aspx +type NMITEMACTIVATE struct { + Hdr NMHDR + IItem int32 + ISubItem int32 + UNewState uint32 + UOldState uint32 + UChanged uint32 + PtAction POINT + LParam uintptr + UKeyFlags uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774773.aspx +type NMLISTVIEW struct { + Hdr NMHDR + IItem int32 + ISubItem int32 + UNewState uint32 + UOldState uint32 + UChanged uint32 + PtAction POINT + LParam uintptr +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb774780.aspx +type NMLVDISPINFO struct { + Hdr NMHDR + Item LVITEM +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb775507.aspx +type INITCOMMONCONTROLSEX struct { + DwSize uint32 + DwICC uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb760256.aspx +type TOOLINFO struct { + CbSize uint32 + UFlags uint32 + Hwnd HWND + UId uintptr + Rect RECT + Hinst HINSTANCE + LpszText *uint16 + LParam uintptr + LpReserved unsafe.Pointer +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms645604.aspx +type TRACKMOUSEEVENT struct { + CbSize uint32 + DwFlags uint32 + HwndTrack HWND + DwHoverTime uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms534067.aspx +type GdiplusStartupInput struct { + GdiplusVersion uint32 + DebugEventCallback uintptr + SuppressBackgroundThread BOOL + SuppressExternalCodecs BOOL +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms534068.aspx +type GdiplusStartupOutput struct { + NotificationHook uintptr + NotificationUnhook uintptr +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd162768.aspx +type PAINTSTRUCT struct { + Hdc HDC + FErase BOOL + RcPaint RECT + FRestore BOOL + FIncUpdate BOOL + RgbReserved [32]byte +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa363646.aspx +type EVENTLOGRECORD struct { + Length uint32 + Reserved uint32 + RecordNumber uint32 + TimeGenerated uint32 + TimeWritten uint32 + EventID uint32 + EventType uint16 + NumStrings uint16 + EventCategory uint16 + ReservedFlags uint16 + ClosingRecordNumber uint32 + StringOffset uint32 + UserSidLength uint32 + UserSidOffset uint32 + DataLength uint32 + DataOffset uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms685996.aspx +type SERVICE_STATUS struct { + DwServiceType uint32 + DwCurrentState uint32 + DwControlsAccepted uint32 + DwWin32ExitCode uint32 + DwServiceSpecificExitCode uint32 + DwCheckPoint uint32 + DwWaitHint uint32 +} +type PROCESSENTRY32 struct { + DwSize uint32 + CntUsage uint32 + Th32ProcessID uint32 + Th32DefaultHeapID uintptr + Th32ModuleID uint32 + CntThreads uint32 + Th32ParentProcessID uint32 + PcPriClassBase int32 + DwFlags uint32 + SzExeFile [MAX_PATH]uint16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms684225.aspx +type MODULEENTRY32 struct { + Size uint32 + ModuleID uint32 + ProcessID uint32 + GlblcntUsage uint32 + ProccntUsage uint32 + ModBaseAddr *uint8 + ModBaseSize uint32 + HModule HMODULE + SzModule [MAX_MODULE_NAME32 + 1]uint16 + SzExePath [MAX_PATH]uint16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms724284.aspx +type FILETIME struct { + DwLowDateTime uint32 + DwHighDateTime uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119.aspx +type COORD struct { + X, Y int16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311.aspx +type SMALL_RECT struct { + Left, Top, Right, Bottom int16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093.aspx +type CONSOLE_SCREEN_BUFFER_INFO struct { + DwSize COORD + DwCursorPosition COORD + WAttributes uint16 + SrWindow SMALL_RECT + DwMaximumWindowSize COORD +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/bb773244.aspx +type MARGINS struct { + CxLeftWidth, CxRightWidth, CyTopHeight, CyBottomHeight int32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa969500.aspx +type DWM_BLURBEHIND struct { + DwFlags uint32 + fEnable BOOL + hRgnBlur HRGN + fTransitionOnMaximized BOOL +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa969501.aspx +type DWM_PRESENT_PARAMETERS struct { + cbSize uint32 + fQueue BOOL + cRefreshStart DWM_FRAME_COUNT + cBuffer uint32 + fUseSourceRate BOOL + rateSource UNSIGNED_RATIO + cRefreshesPerFrame uint32 + eSampling DWM_SOURCE_FRAME_SAMPLING +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa969502.aspx +type DWM_THUMBNAIL_PROPERTIES struct { + dwFlags uint32 + rcDestination RECT + rcSource RECT + opacity byte + fVisible BOOL + fSourceClientAreaOnly BOOL +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa969503.aspx +type DWM_TIMING_INFO struct { + cbSize uint32 + rateRefresh UNSIGNED_RATIO + qpcRefreshPeriod QPC_TIME + rateCompose UNSIGNED_RATIO + qpcVBlank QPC_TIME + cRefresh DWM_FRAME_COUNT + cDXRefresh uint32 + qpcCompose QPC_TIME + cFrame DWM_FRAME_COUNT + cDXPresent uint32 + cRefreshFrame DWM_FRAME_COUNT + cFrameSubmitted DWM_FRAME_COUNT + cDXPresentSubmitted uint32 + cFrameConfirmed DWM_FRAME_COUNT + cDXPresentConfirmed uint32 + cRefreshConfirmed DWM_FRAME_COUNT + cDXRefreshConfirmed uint32 + cFramesLate DWM_FRAME_COUNT + cFramesOutstanding uint32 + cFrameDisplayed DWM_FRAME_COUNT + qpcFrameDisplayed QPC_TIME + cRefreshFrameDisplayed DWM_FRAME_COUNT + cFrameComplete DWM_FRAME_COUNT + qpcFrameComplete QPC_TIME + cFramePending DWM_FRAME_COUNT + qpcFramePending QPC_TIME + cFramesDisplayed DWM_FRAME_COUNT + cFramesComplete DWM_FRAME_COUNT + cFramesPending DWM_FRAME_COUNT + cFramesAvailable DWM_FRAME_COUNT + cFramesDropped DWM_FRAME_COUNT + cFramesMissed DWM_FRAME_COUNT + cRefreshNextDisplayed DWM_FRAME_COUNT + cRefreshNextPresented DWM_FRAME_COUNT + cRefreshesDisplayed DWM_FRAME_COUNT + cRefreshesPresented DWM_FRAME_COUNT + cRefreshStarted DWM_FRAME_COUNT + cPixelsReceived uint64 + cPixelsDrawn uint64 + cBuffersEmpty DWM_FRAME_COUNT +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd389402.aspx +type MilMatrix3x2D struct { + S_11, S_12, S_21, S_22 float64 + DX, DY float64 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa969505.aspx +type UNSIGNED_RATIO struct { + uiNumerator uint32 + uiDenominator uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms632603.aspx +type CREATESTRUCT struct { + CreateParams uintptr + Instance HINSTANCE + Menu HMENU + Parent HWND + Cy, Cx int32 + Y, X int32 + Style int32 + Name *uint16 + Class *uint16 + dwExStyle uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145065.aspx +type MONITORINFO struct { + CbSize uint32 + RcMonitor RECT + RcWork RECT + DwFlags uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd145066.aspx +type MONITORINFOEX struct { + MONITORINFO + SzDevice [CCHDEVICENAME]uint16 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/dd368826.aspx +type PIXELFORMATDESCRIPTOR struct { + Size uint16 + Version uint16 + DwFlags uint32 + IPixelType byte + ColorBits byte + RedBits, RedShift byte + GreenBits, GreenShift byte + BlueBits, BlueShift byte + AlphaBits, AlphaShift byte + AccumBits byte + AccumRedBits byte + AccumGreenBits byte + AccumBlueBits byte + AccumAlphaBits byte + DepthBits, StencilBits byte + AuxBuffers byte + ILayerType byte + Reserved byte + DwLayerMask uint32 + DwVisibleMask uint32 + DwDamageMask uint32 +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms646270(v=vs.85).aspx +type INPUT struct { + Type uint32 + Mi MOUSEINPUT + Ki KEYBDINPUT + Hi HARDWAREINPUT +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms646273(v=vs.85).aspx +type MOUSEINPUT struct { + Dx int32 + Dy int32 + MouseData uint32 + DwFlags uint32 + Time uint32 + DwExtraInfo uintptr +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms646271(v=vs.85).aspx +type KEYBDINPUT struct { + WVk uint16 + WScan uint16 + DwFlags uint32 + Time uint32 + DwExtraInfo uintptr +} + +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms646269(v=vs.85).aspx +type HARDWAREINPUT struct { + UMsg uint32 + WParamL uint16 + WParamH uint16 +} + +type KbdInput struct { + typ uint32 + ki KEYBDINPUT +} + +type MouseInput struct { + typ uint32 + mi MOUSEINPUT +} + +type HardwareInput struct { + typ uint32 + hi HARDWAREINPUT +} diff --git a/vendor/github.com/shirou/w32/user32.go b/vendor/github.com/shirou/w32/user32.go new file mode 100644 index 0000000..6aa7cd7 --- /dev/null +++ b/vendor/github.com/shirou/w32/user32.go @@ -0,0 +1,950 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "fmt" + "syscall" + "unsafe" +) + +var ( + moduser32 = syscall.NewLazyDLL("user32.dll") + + procRegisterClassEx = moduser32.NewProc("RegisterClassExW") + procLoadIcon = moduser32.NewProc("LoadIconW") + procLoadCursor = moduser32.NewProc("LoadCursorW") + procShowWindow = moduser32.NewProc("ShowWindow") + procUpdateWindow = moduser32.NewProc("UpdateWindow") + procCreateWindowEx = moduser32.NewProc("CreateWindowExW") + procAdjustWindowRect = moduser32.NewProc("AdjustWindowRect") + procAdjustWindowRectEx = moduser32.NewProc("AdjustWindowRectEx") + procDestroyWindow = moduser32.NewProc("DestroyWindow") + procDefWindowProc = moduser32.NewProc("DefWindowProcW") + procDefDlgProc = moduser32.NewProc("DefDlgProcW") + procPostQuitMessage = moduser32.NewProc("PostQuitMessage") + procGetMessage = moduser32.NewProc("GetMessageW") + procTranslateMessage = moduser32.NewProc("TranslateMessage") + procDispatchMessage = moduser32.NewProc("DispatchMessageW") + procSendMessage = moduser32.NewProc("SendMessageW") + procPostMessage = moduser32.NewProc("PostMessageW") + procWaitMessage = moduser32.NewProc("WaitMessage") + procSetWindowText = moduser32.NewProc("SetWindowTextW") + procGetWindowTextLength = moduser32.NewProc("GetWindowTextLengthW") + procGetWindowText = moduser32.NewProc("GetWindowTextW") + procGetWindowRect = moduser32.NewProc("GetWindowRect") + procMoveWindow = moduser32.NewProc("MoveWindow") + procScreenToClient = moduser32.NewProc("ScreenToClient") + procCallWindowProc = moduser32.NewProc("CallWindowProcW") + procSetWindowLong = moduser32.NewProc("SetWindowLongW") + procSetWindowLongPtr = moduser32.NewProc("SetWindowLongW") + procGetWindowLong = moduser32.NewProc("GetWindowLongW") + procGetWindowLongPtr = moduser32.NewProc("GetWindowLongW") + procEnableWindow = moduser32.NewProc("EnableWindow") + procIsWindowEnabled = moduser32.NewProc("IsWindowEnabled") + procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procSetFocus = moduser32.NewProc("SetFocus") + procInvalidateRect = moduser32.NewProc("InvalidateRect") + procGetClientRect = moduser32.NewProc("GetClientRect") + procGetDC = moduser32.NewProc("GetDC") + procReleaseDC = moduser32.NewProc("ReleaseDC") + procSetCapture = moduser32.NewProc("SetCapture") + procReleaseCapture = moduser32.NewProc("ReleaseCapture") + procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") + procMessageBox = moduser32.NewProc("MessageBoxW") + procGetSystemMetrics = moduser32.NewProc("GetSystemMetrics") + procCopyRect = moduser32.NewProc("CopyRect") + procEqualRect = moduser32.NewProc("EqualRect") + procInflateRect = moduser32.NewProc("InflateRect") + procIntersectRect = moduser32.NewProc("IntersectRect") + procIsRectEmpty = moduser32.NewProc("IsRectEmpty") + procOffsetRect = moduser32.NewProc("OffsetRect") + procPtInRect = moduser32.NewProc("PtInRect") + procSetRect = moduser32.NewProc("SetRect") + procSetRectEmpty = moduser32.NewProc("SetRectEmpty") + procSubtractRect = moduser32.NewProc("SubtractRect") + procUnionRect = moduser32.NewProc("UnionRect") + procCreateDialogParam = moduser32.NewProc("CreateDialogParamW") + procDialogBoxParam = moduser32.NewProc("DialogBoxParamW") + procGetDlgItem = moduser32.NewProc("GetDlgItem") + procDrawIcon = moduser32.NewProc("DrawIcon") + procClientToScreen = moduser32.NewProc("ClientToScreen") + procIsDialogMessage = moduser32.NewProc("IsDialogMessageW") + procIsWindow = moduser32.NewProc("IsWindow") + procEndDialog = moduser32.NewProc("EndDialog") + procPeekMessage = moduser32.NewProc("PeekMessageW") + procTranslateAccelerator = moduser32.NewProc("TranslateAcceleratorW") + procSetWindowPos = moduser32.NewProc("SetWindowPos") + procFillRect = moduser32.NewProc("FillRect") + procDrawText = moduser32.NewProc("DrawTextW") + procAddClipboardFormatListener = moduser32.NewProc("AddClipboardFormatListener") + procRemoveClipboardFormatListener = moduser32.NewProc("RemoveClipboardFormatListener") + procOpenClipboard = moduser32.NewProc("OpenClipboard") + procCloseClipboard = moduser32.NewProc("CloseClipboard") + procEnumClipboardFormats = moduser32.NewProc("EnumClipboardFormats") + procGetClipboardData = moduser32.NewProc("GetClipboardData") + procSetClipboardData = moduser32.NewProc("SetClipboardData") + procEmptyClipboard = moduser32.NewProc("EmptyClipboard") + procGetClipboardFormatName = moduser32.NewProc("GetClipboardFormatNameW") + procIsClipboardFormatAvailable = moduser32.NewProc("IsClipboardFormatAvailable") + procBeginPaint = moduser32.NewProc("BeginPaint") + procEndPaint = moduser32.NewProc("EndPaint") + procGetKeyboardState = moduser32.NewProc("GetKeyboardState") + procMapVirtualKey = moduser32.NewProc("MapVirtualKeyExW") + procGetAsyncKeyState = moduser32.NewProc("GetAsyncKeyState") + procToAscii = moduser32.NewProc("ToAscii") + procSwapMouseButton = moduser32.NewProc("SwapMouseButton") + procGetCursorPos = moduser32.NewProc("GetCursorPos") + procSetCursorPos = moduser32.NewProc("SetCursorPos") + procSetCursor = moduser32.NewProc("SetCursor") + procCreateIcon = moduser32.NewProc("CreateIcon") + procDestroyIcon = moduser32.NewProc("DestroyIcon") + procMonitorFromPoint = moduser32.NewProc("MonitorFromPoint") + procMonitorFromRect = moduser32.NewProc("MonitorFromRect") + procMonitorFromWindow = moduser32.NewProc("MonitorFromWindow") + procGetMonitorInfo = moduser32.NewProc("GetMonitorInfoW") + procEnumDisplayMonitors = moduser32.NewProc("EnumDisplayMonitors") + procEnumDisplaySettingsEx = moduser32.NewProc("EnumDisplaySettingsExW") + procChangeDisplaySettingsEx = moduser32.NewProc("ChangeDisplaySettingsExW") + procSendInput = moduser32.NewProc("SendInput") +) + +func RegisterClassEx(wndClassEx *WNDCLASSEX) ATOM { + ret, _, _ := procRegisterClassEx.Call(uintptr(unsafe.Pointer(wndClassEx))) + return ATOM(ret) +} + +func LoadIcon(instance HINSTANCE, iconName *uint16) HICON { + ret, _, _ := procLoadIcon.Call( + uintptr(instance), + uintptr(unsafe.Pointer(iconName))) + + return HICON(ret) + +} + +func LoadCursor(instance HINSTANCE, cursorName *uint16) HCURSOR { + ret, _, _ := procLoadCursor.Call( + uintptr(instance), + uintptr(unsafe.Pointer(cursorName))) + + return HCURSOR(ret) + +} + +func ShowWindow(hwnd HWND, cmdshow int) bool { + ret, _, _ := procShowWindow.Call( + uintptr(hwnd), + uintptr(cmdshow)) + + return ret != 0 + +} + +func UpdateWindow(hwnd HWND) bool { + ret, _, _ := procUpdateWindow.Call( + uintptr(hwnd)) + return ret != 0 +} + +func CreateWindowEx(exStyle uint, className, windowName *uint16, + style uint, x, y, width, height int, parent HWND, menu HMENU, + instance HINSTANCE, param unsafe.Pointer) HWND { + ret, _, _ := procCreateWindowEx.Call( + uintptr(exStyle), + uintptr(unsafe.Pointer(className)), + uintptr(unsafe.Pointer(windowName)), + uintptr(style), + uintptr(x), + uintptr(y), + uintptr(width), + uintptr(height), + uintptr(parent), + uintptr(menu), + uintptr(instance), + uintptr(param)) + + return HWND(ret) +} + +func AdjustWindowRectEx(rect *RECT, style uint, menu bool, exStyle uint) bool { + ret, _, _ := procAdjustWindowRectEx.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(style), + uintptr(BoolToBOOL(menu)), + uintptr(exStyle)) + + return ret != 0 +} + +func AdjustWindowRect(rect *RECT, style uint, menu bool) bool { + ret, _, _ := procAdjustWindowRect.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(style), + uintptr(BoolToBOOL(menu))) + + return ret != 0 +} + +func DestroyWindow(hwnd HWND) bool { + ret, _, _ := procDestroyWindow.Call( + uintptr(hwnd)) + + return ret != 0 +} + +func DefWindowProc(hwnd HWND, msg uint32, wParam, lParam uintptr) uintptr { + ret, _, _ := procDefWindowProc.Call( + uintptr(hwnd), + uintptr(msg), + wParam, + lParam) + + return ret +} + +func DefDlgProc(hwnd HWND, msg uint32, wParam, lParam uintptr) uintptr { + ret, _, _ := procDefDlgProc.Call( + uintptr(hwnd), + uintptr(msg), + wParam, + lParam) + + return ret +} + +func PostQuitMessage(exitCode int) { + procPostQuitMessage.Call( + uintptr(exitCode)) +} + +func GetMessage(msg *MSG, hwnd HWND, msgFilterMin, msgFilterMax uint32) int { + ret, _, _ := procGetMessage.Call( + uintptr(unsafe.Pointer(msg)), + uintptr(hwnd), + uintptr(msgFilterMin), + uintptr(msgFilterMax)) + + return int(ret) +} + +func TranslateMessage(msg *MSG) bool { + ret, _, _ := procTranslateMessage.Call( + uintptr(unsafe.Pointer(msg))) + + return ret != 0 + +} + +func DispatchMessage(msg *MSG) uintptr { + ret, _, _ := procDispatchMessage.Call( + uintptr(unsafe.Pointer(msg))) + + return ret + +} + +func SendMessage(hwnd HWND, msg uint32, wParam, lParam uintptr) uintptr { + ret, _, _ := procSendMessage.Call( + uintptr(hwnd), + uintptr(msg), + wParam, + lParam) + + return ret +} + +func PostMessage(hwnd HWND, msg uint32, wParam, lParam uintptr) bool { + ret, _, _ := procPostMessage.Call( + uintptr(hwnd), + uintptr(msg), + wParam, + lParam) + + return ret != 0 +} + +func WaitMessage() bool { + ret, _, _ := procWaitMessage.Call() + return ret != 0 +} + +func SetWindowText(hwnd HWND, text string) { + procSetWindowText.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(text)))) +} + +func GetWindowTextLength(hwnd HWND) int { + ret, _, _ := procGetWindowTextLength.Call( + uintptr(hwnd)) + + return int(ret) +} + +func GetWindowText(hwnd HWND) string { + textLen := GetWindowTextLength(hwnd) + 1 + + buf := make([]uint16, textLen) + procGetWindowText.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(textLen)) + + return syscall.UTF16ToString(buf) +} + +func GetWindowRect(hwnd HWND) *RECT { + var rect RECT + procGetWindowRect.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&rect))) + + return &rect +} + +func MoveWindow(hwnd HWND, x, y, width, height int, repaint bool) bool { + ret, _, _ := procMoveWindow.Call( + uintptr(hwnd), + uintptr(x), + uintptr(y), + uintptr(width), + uintptr(height), + uintptr(BoolToBOOL(repaint))) + + return ret != 0 + +} + +func ScreenToClient(hwnd HWND, x, y int) (X, Y int, ok bool) { + pt := POINT{X: int32(x), Y: int32(y)} + ret, _, _ := procScreenToClient.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&pt))) + + return int(pt.X), int(pt.Y), ret != 0 +} + +func CallWindowProc(preWndProc uintptr, hwnd HWND, msg uint32, wParam, lParam uintptr) uintptr { + ret, _, _ := procCallWindowProc.Call( + preWndProc, + uintptr(hwnd), + uintptr(msg), + wParam, + lParam) + + return ret +} + +func SetWindowLong(hwnd HWND, index int, value uint32) uint32 { + ret, _, _ := procSetWindowLong.Call( + uintptr(hwnd), + uintptr(index), + uintptr(value)) + + return uint32(ret) +} + +func SetWindowLongPtr(hwnd HWND, index int, value uintptr) uintptr { + ret, _, _ := procSetWindowLongPtr.Call( + uintptr(hwnd), + uintptr(index), + value) + + return ret +} + +func GetWindowLong(hwnd HWND, index int) int32 { + ret, _, _ := procGetWindowLong.Call( + uintptr(hwnd), + uintptr(index)) + + return int32(ret) +} + +func GetWindowLongPtr(hwnd HWND, index int) uintptr { + ret, _, _ := procGetWindowLongPtr.Call( + uintptr(hwnd), + uintptr(index)) + + return ret +} + +func EnableWindow(hwnd HWND, b bool) bool { + ret, _, _ := procEnableWindow.Call( + uintptr(hwnd), + uintptr(BoolToBOOL(b))) + return ret != 0 +} + +func IsWindowEnabled(hwnd HWND) bool { + ret, _, _ := procIsWindowEnabled.Call( + uintptr(hwnd)) + + return ret != 0 +} + +func IsWindowVisible(hwnd HWND) bool { + ret, _, _ := procIsWindowVisible.Call( + uintptr(hwnd)) + + return ret != 0 +} + +func SetFocus(hwnd HWND) HWND { + ret, _, _ := procSetFocus.Call( + uintptr(hwnd)) + + return HWND(ret) +} + +func InvalidateRect(hwnd HWND, rect *RECT, erase bool) bool { + ret, _, _ := procInvalidateRect.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(rect)), + uintptr(BoolToBOOL(erase))) + + return ret != 0 +} + +func GetClientRect(hwnd HWND) *RECT { + var rect RECT + ret, _, _ := procGetClientRect.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&rect))) + + if ret == 0 { + panic(fmt.Sprintf("GetClientRect(%d) failed", hwnd)) + } + + return &rect +} + +func GetDC(hwnd HWND) HDC { + ret, _, _ := procGetDC.Call( + uintptr(hwnd)) + + return HDC(ret) +} + +func ReleaseDC(hwnd HWND, hDC HDC) bool { + ret, _, _ := procReleaseDC.Call( + uintptr(hwnd), + uintptr(hDC)) + + return ret != 0 +} + +func SetCapture(hwnd HWND) HWND { + ret, _, _ := procSetCapture.Call( + uintptr(hwnd)) + + return HWND(ret) +} + +func ReleaseCapture() bool { + ret, _, _ := procReleaseCapture.Call() + + return ret != 0 +} + +func GetWindowThreadProcessId(hwnd HWND) (HANDLE, int) { + var processId int + ret, _, _ := procGetWindowThreadProcessId.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&processId))) + + return HANDLE(ret), processId +} + +func MessageBox(hwnd HWND, title, caption string, flags uint) int { + ret, _, _ := procMessageBox.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(title))), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(caption))), + uintptr(flags)) + + return int(ret) +} + +func GetSystemMetrics(index int) int { + ret, _, _ := procGetSystemMetrics.Call( + uintptr(index)) + + return int(ret) +} + +func CopyRect(dst, src *RECT) bool { + ret, _, _ := procCopyRect.Call( + uintptr(unsafe.Pointer(dst)), + uintptr(unsafe.Pointer(src))) + + return ret != 0 +} + +func EqualRect(rect1, rect2 *RECT) bool { + ret, _, _ := procEqualRect.Call( + uintptr(unsafe.Pointer(rect1)), + uintptr(unsafe.Pointer(rect2))) + + return ret != 0 +} + +func InflateRect(rect *RECT, dx, dy int) bool { + ret, _, _ := procInflateRect.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(dx), + uintptr(dy)) + + return ret != 0 +} + +func IntersectRect(dst, src1, src2 *RECT) bool { + ret, _, _ := procIntersectRect.Call( + uintptr(unsafe.Pointer(dst)), + uintptr(unsafe.Pointer(src1)), + uintptr(unsafe.Pointer(src2))) + + return ret != 0 +} + +func IsRectEmpty(rect *RECT) bool { + ret, _, _ := procIsRectEmpty.Call( + uintptr(unsafe.Pointer(rect))) + + return ret != 0 +} + +func OffsetRect(rect *RECT, dx, dy int) bool { + ret, _, _ := procOffsetRect.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(dx), + uintptr(dy)) + + return ret != 0 +} + +func PtInRect(rect *RECT, x, y int) bool { + pt := POINT{X: int32(x), Y: int32(y)} + ret, _, _ := procPtInRect.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(unsafe.Pointer(&pt))) + + return ret != 0 +} + +func SetRect(rect *RECT, left, top, right, bottom int) bool { + ret, _, _ := procSetRect.Call( + uintptr(unsafe.Pointer(rect)), + uintptr(left), + uintptr(top), + uintptr(right), + uintptr(bottom)) + + return ret != 0 +} + +func SetRectEmpty(rect *RECT) bool { + ret, _, _ := procSetRectEmpty.Call( + uintptr(unsafe.Pointer(rect))) + + return ret != 0 +} + +func SubtractRect(dst, src1, src2 *RECT) bool { + ret, _, _ := procSubtractRect.Call( + uintptr(unsafe.Pointer(dst)), + uintptr(unsafe.Pointer(src1)), + uintptr(unsafe.Pointer(src2))) + + return ret != 0 +} + +func UnionRect(dst, src1, src2 *RECT) bool { + ret, _, _ := procUnionRect.Call( + uintptr(unsafe.Pointer(dst)), + uintptr(unsafe.Pointer(src1)), + uintptr(unsafe.Pointer(src2))) + + return ret != 0 +} + +func CreateDialog(hInstance HINSTANCE, lpTemplate *uint16, hWndParent HWND, lpDialogProc uintptr) HWND { + ret, _, _ := procCreateDialogParam.Call( + uintptr(hInstance), + uintptr(unsafe.Pointer(lpTemplate)), + uintptr(hWndParent), + lpDialogProc, + 0) + + return HWND(ret) +} + +func DialogBox(hInstance HINSTANCE, lpTemplateName *uint16, hWndParent HWND, lpDialogProc uintptr) int { + ret, _, _ := procDialogBoxParam.Call( + uintptr(hInstance), + uintptr(unsafe.Pointer(lpTemplateName)), + uintptr(hWndParent), + lpDialogProc, + 0) + + return int(ret) +} + +func GetDlgItem(hDlg HWND, nIDDlgItem int) HWND { + ret, _, _ := procGetDlgItem.Call( + uintptr(unsafe.Pointer(hDlg)), + uintptr(nIDDlgItem)) + + return HWND(ret) +} + +func DrawIcon(hDC HDC, x, y int, hIcon HICON) bool { + ret, _, _ := procDrawIcon.Call( + uintptr(unsafe.Pointer(hDC)), + uintptr(x), + uintptr(y), + uintptr(unsafe.Pointer(hIcon))) + + return ret != 0 +} + +func ClientToScreen(hwnd HWND, x, y int) (int, int) { + pt := POINT{X: int32(x), Y: int32(y)} + + procClientToScreen.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(&pt))) + + return int(pt.X), int(pt.Y) +} + +func IsDialogMessage(hwnd HWND, msg *MSG) bool { + ret, _, _ := procIsDialogMessage.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(msg))) + + return ret != 0 +} + +func IsWindow(hwnd HWND) bool { + ret, _, _ := procIsWindow.Call( + uintptr(hwnd)) + + return ret != 0 +} + +func EndDialog(hwnd HWND, nResult uintptr) bool { + ret, _, _ := procEndDialog.Call( + uintptr(hwnd), + nResult) + + return ret != 0 +} + +func PeekMessage(lpMsg *MSG, hwnd HWND, wMsgFilterMin, wMsgFilterMax, wRemoveMsg uint32) bool { + ret, _, _ := procPeekMessage.Call( + uintptr(unsafe.Pointer(lpMsg)), + uintptr(hwnd), + uintptr(wMsgFilterMin), + uintptr(wMsgFilterMax), + uintptr(wRemoveMsg)) + + return ret != 0 +} + +func TranslateAccelerator(hwnd HWND, hAccTable HACCEL, lpMsg *MSG) bool { + ret, _, _ := procTranslateMessage.Call( + uintptr(hwnd), + uintptr(hAccTable), + uintptr(unsafe.Pointer(lpMsg))) + + return ret != 0 +} + +func SetWindowPos(hwnd, hWndInsertAfter HWND, x, y, cx, cy int, uFlags uint) bool { + ret, _, _ := procSetWindowPos.Call( + uintptr(hwnd), + uintptr(hWndInsertAfter), + uintptr(x), + uintptr(y), + uintptr(cx), + uintptr(cy), + uintptr(uFlags)) + + return ret != 0 +} + +func FillRect(hDC HDC, lprc *RECT, hbr HBRUSH) bool { + ret, _, _ := procFillRect.Call( + uintptr(hDC), + uintptr(unsafe.Pointer(lprc)), + uintptr(hbr)) + + return ret != 0 +} + +func DrawText(hDC HDC, text string, uCount int, lpRect *RECT, uFormat uint) int { + ret, _, _ := procDrawText.Call( + uintptr(hDC), + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(text))), + uintptr(uCount), + uintptr(unsafe.Pointer(lpRect)), + uintptr(uFormat)) + + return int(ret) +} + +func AddClipboardFormatListener(hwnd HWND) bool { + ret, _, _ := procAddClipboardFormatListener.Call( + uintptr(hwnd)) + return ret != 0 +} + +func RemoveClipboardFormatListener(hwnd HWND) bool { + ret, _, _ := procRemoveClipboardFormatListener.Call( + uintptr(hwnd)) + return ret != 0 +} + +func OpenClipboard(hWndNewOwner HWND) bool { + ret, _, _ := procOpenClipboard.Call( + uintptr(hWndNewOwner)) + return ret != 0 +} + +func CloseClipboard() bool { + ret, _, _ := procCloseClipboard.Call() + return ret != 0 +} + +func EnumClipboardFormats(format uint) uint { + ret, _, _ := procEnumClipboardFormats.Call( + uintptr(format)) + return uint(ret) +} + +func GetClipboardData(uFormat uint) HANDLE { + ret, _, _ := procGetClipboardData.Call( + uintptr(uFormat)) + return HANDLE(ret) +} + +func SetClipboardData(uFormat uint, hMem HANDLE) HANDLE { + ret, _, _ := procSetClipboardData.Call( + uintptr(uFormat), + uintptr(hMem)) + return HANDLE(ret) +} + +func EmptyClipboard() bool { + ret, _, _ := procEmptyClipboard.Call() + return ret != 0 +} + +func GetClipboardFormatName(format uint) (string, bool) { + cchMaxCount := 255 + buf := make([]uint16, cchMaxCount) + ret, _, _ := procGetClipboardFormatName.Call( + uintptr(format), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(cchMaxCount)) + + if ret > 0 { + return syscall.UTF16ToString(buf), true + } + + return "Requested format does not exist or is predefined", false +} + +func IsClipboardFormatAvailable(format uint) bool { + ret, _, _ := procIsClipboardFormatAvailable.Call(uintptr(format)) + return ret != 0 +} + +func BeginPaint(hwnd HWND, paint *PAINTSTRUCT) HDC { + ret, _, _ := procBeginPaint.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(paint))) + return HDC(ret) +} + +func EndPaint(hwnd HWND, paint *PAINTSTRUCT) { + procBeginPaint.Call( + uintptr(hwnd), + uintptr(unsafe.Pointer(paint))) +} + +func GetKeyboardState(lpKeyState *[]byte) bool { + ret, _, _ := procGetKeyboardState.Call( + uintptr(unsafe.Pointer(&(*lpKeyState)[0]))) + return ret != 0 +} + +func MapVirtualKeyEx(uCode, uMapType uint, dwhkl HKL) uint { + ret, _, _ := procMapVirtualKey.Call( + uintptr(uCode), + uintptr(uMapType), + uintptr(dwhkl)) + return uint(ret) +} + +func GetAsyncKeyState(vKey int) uint16 { + ret, _, _ := procGetAsyncKeyState.Call(uintptr(vKey)) + return uint16(ret) +} + +func ToAscii(uVirtKey, uScanCode uint, lpKeyState *byte, lpChar *uint16, uFlags uint) int { + ret, _, _ := procToAscii.Call( + uintptr(uVirtKey), + uintptr(uScanCode), + uintptr(unsafe.Pointer(lpKeyState)), + uintptr(unsafe.Pointer(lpChar)), + uintptr(uFlags)) + return int(ret) +} + +func SwapMouseButton(fSwap bool) bool { + ret, _, _ := procSwapMouseButton.Call( + uintptr(BoolToBOOL(fSwap))) + return ret != 0 +} + +func GetCursorPos() (x, y int, ok bool) { + pt := POINT{} + ret, _, _ := procGetCursorPos.Call(uintptr(unsafe.Pointer(&pt))) + return int(pt.X), int(pt.Y), ret != 0 +} + +func SetCursorPos(x, y int) bool { + ret, _, _ := procSetCursorPos.Call( + uintptr(x), + uintptr(y), + ) + return ret != 0 +} + +func SetCursor(cursor HCURSOR) HCURSOR { + ret, _, _ := procSetCursor.Call( + uintptr(cursor), + ) + return HCURSOR(ret) +} + +func CreateIcon(instance HINSTANCE, nWidth, nHeight int, cPlanes, cBitsPerPixel byte, ANDbits, XORbits *byte) HICON { + ret, _, _ := procCreateIcon.Call( + uintptr(instance), + uintptr(nWidth), + uintptr(nHeight), + uintptr(cPlanes), + uintptr(cBitsPerPixel), + uintptr(unsafe.Pointer(ANDbits)), + uintptr(unsafe.Pointer(XORbits)), + ) + return HICON(ret) +} + +func DestroyIcon(icon HICON) bool { + ret, _, _ := procDestroyIcon.Call( + uintptr(icon), + ) + return ret != 0 +} + +func MonitorFromPoint(x, y int, dwFlags uint32) HMONITOR { + ret, _, _ := procMonitorFromPoint.Call( + uintptr(x), + uintptr(y), + uintptr(dwFlags), + ) + return HMONITOR(ret) +} + +func MonitorFromRect(rc *RECT, dwFlags uint32) HMONITOR { + ret, _, _ := procMonitorFromRect.Call( + uintptr(unsafe.Pointer(rc)), + uintptr(dwFlags), + ) + return HMONITOR(ret) +} + +func MonitorFromWindow(hwnd HWND, dwFlags uint32) HMONITOR { + ret, _, _ := procMonitorFromWindow.Call( + uintptr(hwnd), + uintptr(dwFlags), + ) + return HMONITOR(ret) +} + +func GetMonitorInfo(hMonitor HMONITOR, lmpi *MONITORINFO) bool { + ret, _, _ := procGetMonitorInfo.Call( + uintptr(hMonitor), + uintptr(unsafe.Pointer(lmpi)), + ) + return ret != 0 +} + +func EnumDisplayMonitors(hdc HDC, clip *RECT, fnEnum, dwData uintptr) bool { + ret, _, _ := procEnumDisplayMonitors.Call( + uintptr(hdc), + uintptr(unsafe.Pointer(clip)), + fnEnum, + dwData, + ) + return ret != 0 +} + +func EnumDisplaySettingsEx(szDeviceName *uint16, iModeNum uint32, devMode *DEVMODE, dwFlags uint32) bool { + ret, _, _ := procEnumDisplaySettingsEx.Call( + uintptr(unsafe.Pointer(szDeviceName)), + uintptr(iModeNum), + uintptr(unsafe.Pointer(devMode)), + uintptr(dwFlags), + ) + return ret != 0 +} + +func ChangeDisplaySettingsEx(szDeviceName *uint16, devMode *DEVMODE, hwnd HWND, dwFlags uint32, lParam uintptr) int32 { + ret, _, _ := procChangeDisplaySettingsEx.Call( + uintptr(unsafe.Pointer(szDeviceName)), + uintptr(unsafe.Pointer(devMode)), + uintptr(hwnd), + uintptr(dwFlags), + lParam, + ) + return int32(ret) +} + +/* remove to build without cgo +func SendInput(inputs []INPUT) uint32 { + var validInputs []C.INPUT + + for _, oneInput := range inputs { + input := C.INPUT{_type: C.DWORD(oneInput.Type)} + + switch oneInput.Type { + case INPUT_MOUSE: + (*MouseInput)(unsafe.Pointer(&input)).mi = oneInput.Mi + case INPUT_KEYBOARD: + (*KbdInput)(unsafe.Pointer(&input)).ki = oneInput.Ki + case INPUT_HARDWARE: + (*HardwareInput)(unsafe.Pointer(&input)).hi = oneInput.Hi + default: + panic("unkown type") + } + + validInputs = append(validInputs, input) + } + + ret, _, _ := procSendInput.Call( + uintptr(len(validInputs)), + uintptr(unsafe.Pointer(&validInputs[0])), + uintptr(unsafe.Sizeof(C.INPUT{})), + ) + return uint32(ret) +} +*/ diff --git a/vendor/github.com/shirou/w32/utils.go b/vendor/github.com/shirou/w32/utils.go new file mode 100644 index 0000000..69aa31a --- /dev/null +++ b/vendor/github.com/shirou/w32/utils.go @@ -0,0 +1,203 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package w32 + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +func MakeIntResource(id uint16) *uint16 { + return (*uint16)(unsafe.Pointer(uintptr(id))) +} + +func LOWORD(dw uint32) uint16 { + return uint16(dw) +} + +func HIWORD(dw uint32) uint16 { + return uint16(dw >> 16 & 0xffff) +} + +func BoolToBOOL(value bool) BOOL { + if value { + return 1 + } + + return 0 +} + +func UTF16PtrToString(cstr *uint16) string { + if cstr != nil { + us := make([]uint16, 0, 256) + for p := uintptr(unsafe.Pointer(cstr)); ; p += 2 { + u := *(*uint16)(unsafe.Pointer(p)) + if u == 0 { + return string(utf16.Decode(us)) + } + us = append(us, u) + } + } + + return "" +} + +func ComAddRef(unknown *IUnknown) int32 { + ret, _, _ := syscall.Syscall(unknown.lpVtbl.pAddRef, 1, + uintptr(unsafe.Pointer(unknown)), + 0, + 0) + return int32(ret) +} + +func ComRelease(unknown *IUnknown) int32 { + ret, _, _ := syscall.Syscall(unknown.lpVtbl.pRelease, 1, + uintptr(unsafe.Pointer(unknown)), + 0, + 0) + return int32(ret) +} + +func ComQueryInterface(unknown *IUnknown, id *GUID) *IDispatch { + var disp *IDispatch + hr, _, _ := syscall.Syscall(unknown.lpVtbl.pQueryInterface, 3, + uintptr(unsafe.Pointer(unknown)), + uintptr(unsafe.Pointer(id)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + panic("Invoke QieryInterface error.") + } + return disp +} + +func ComGetIDsOfName(disp *IDispatch, names []string) []int32 { + wnames := make([]*uint16, len(names)) + dispid := make([]int32, len(names)) + for i := 0; i < len(names); i++ { + wnames[i] = syscall.StringToUTF16Ptr(names[i]) + } + hr, _, _ := syscall.Syscall6(disp.lpVtbl.pGetIDsOfNames, 6, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(unsafe.Pointer(&wnames[0])), + uintptr(len(names)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&dispid[0]))) + if hr != 0 { + panic("Invoke GetIDsOfName error.") + } + return dispid +} + +func ComInvoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT) { + var dispparams DISPPARAMS + + if dispatch&DISPATCH_PROPERTYPUT != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.RgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.CNamedArgs = 1 + } + var vargs []VARIANT + if len(params) > 0 { + vargs = make([]VARIANT, len(params)) + for i, v := range params { + //n := len(params)-i-1 + n := len(params) - i - 1 + VariantInit(&vargs[n]) + switch v.(type) { + case bool: + if v.(bool) { + vargs[n] = VARIANT{VT_BOOL, 0, 0, 0, 0xffff} + } else { + vargs[n] = VARIANT{VT_BOOL, 0, 0, 0, 0} + } + case *bool: + vargs[n] = VARIANT{VT_BOOL | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*bool))))} + case byte: + vargs[n] = VARIANT{VT_I1, 0, 0, 0, int64(v.(byte))} + case *byte: + vargs[n] = VARIANT{VT_I1 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*byte))))} + case int16: + vargs[n] = VARIANT{VT_I2, 0, 0, 0, int64(v.(int16))} + case *int16: + vargs[n] = VARIANT{VT_I2 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*int16))))} + case uint16: + vargs[n] = VARIANT{VT_UI2, 0, 0, 0, int64(v.(int16))} + case *uint16: + vargs[n] = VARIANT{VT_UI2 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*uint16))))} + case int, int32: + vargs[n] = VARIANT{VT_UI4, 0, 0, 0, int64(v.(int))} + case *int, *int32: + vargs[n] = VARIANT{VT_I4 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*int))))} + case uint, uint32: + vargs[n] = VARIANT{VT_UI4, 0, 0, 0, int64(v.(uint))} + case *uint, *uint32: + vargs[n] = VARIANT{VT_UI4 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*uint))))} + case int64: + vargs[n] = VARIANT{VT_I8, 0, 0, 0, v.(int64)} + case *int64: + vargs[n] = VARIANT{VT_I8 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*int64))))} + case uint64: + vargs[n] = VARIANT{VT_UI8, 0, 0, 0, int64(v.(uint64))} + case *uint64: + vargs[n] = VARIANT{VT_UI8 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*uint64))))} + case float32: + vargs[n] = VARIANT{VT_R4, 0, 0, 0, int64(v.(float32))} + case *float32: + vargs[n] = VARIANT{VT_R4 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*float32))))} + case float64: + vargs[n] = VARIANT{VT_R8, 0, 0, 0, int64(v.(float64))} + case *float64: + vargs[n] = VARIANT{VT_R8 | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*float64))))} + case string: + vargs[n] = VARIANT{VT_BSTR, 0, 0, 0, int64(uintptr(unsafe.Pointer(SysAllocString(v.(string)))))} + case *string: + vargs[n] = VARIANT{VT_BSTR | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*string))))} + case *IDispatch: + vargs[n] = VARIANT{VT_DISPATCH, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*IDispatch))))} + case **IDispatch: + vargs[n] = VARIANT{VT_DISPATCH | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(**IDispatch))))} + case nil: + vargs[n] = VARIANT{VT_NULL, 0, 0, 0, 0} + case *VARIANT: + vargs[n] = VARIANT{VT_VARIANT | VT_BYREF, 0, 0, 0, int64(uintptr(unsafe.Pointer(v.(*VARIANT))))} + default: + panic("unknown type") + } + } + dispparams.Rgvarg = uintptr(unsafe.Pointer(&vargs[0])) + dispparams.CArgs = uint32(len(params)) + } + + var ret VARIANT + var excepInfo EXCEPINFO + VariantInit(&ret) + hr, _, _ := syscall.Syscall9(disp.lpVtbl.pInvoke, 8, + uintptr(unsafe.Pointer(disp)), + uintptr(dispid), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(GetUserDefaultLCID()), + uintptr(dispatch), + uintptr(unsafe.Pointer(&dispparams)), + uintptr(unsafe.Pointer(&ret)), + uintptr(unsafe.Pointer(&excepInfo)), + 0) + if hr != 0 { + if excepInfo.BstrDescription != nil { + bs := UTF16PtrToString(excepInfo.BstrDescription) + panic(bs) + } + } + for _, varg := range vargs { + if varg.VT == VT_BSTR && varg.Val != 0 { + SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val))))) + } + } + result = &ret + return +} diff --git a/vendor/github.com/shirou/w32/vars.go b/vendor/github.com/shirou/w32/vars.go new file mode 100644 index 0000000..2dab2e3 --- /dev/null +++ b/vendor/github.com/shirou/w32/vars.go @@ -0,0 +1,13 @@ +// Copyright 2010-2012 The W32 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package w32 + +var ( + IID_NULL = &GUID{0x00000000, 0x0000, 0x0000, [8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}} + IID_IUnknown = &GUID{0x00000000, 0x0000, 0x0000, [8]byte{0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}} + IID_IDispatch = &GUID{0x00020400, 0x0000, 0x0000, [8]byte{0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}} + IID_IConnectionPointContainer = &GUID{0xB196B284, 0xBAB4, 0x101A, [8]byte{0xB6, 0x9C, 0x00, 0xAA, 0x00, 0x34, 0x1D, 0x07}} + IID_IConnectionPoint = &GUID{0xB196B286, 0xBAB4, 0x101A, [8]byte{0xB6, 0x9C, 0x00, 0xAA, 0x00, 0x34, 0x1D, 0x07}} +) diff --git a/vendor/github.com/ugorji/go/LICENSE b/vendor/github.com/ugorji/go/LICENSE new file mode 100644 index 0000000..95a0f05 --- /dev/null +++ b/vendor/github.com/ugorji/go/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go new file mode 100644 index 0000000..209f9eb --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/0doc.go @@ -0,0 +1,199 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +High Performance, Feature-Rich Idiomatic Go codec/encoding library for +binc, msgpack, cbor, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +To install: + + go get github.com/ugorji/go/codec + +This package understands the 'unsafe' tag, to allow using unsafe semantics: + + - When decoding into a struct, you need to read the field name as a string + so you can find the struct field it is mapped to. + Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion. + +To install using unsafe, pass the 'unsafe' tag: + + go get -tags=unsafe github.com/ugorji/go/codec + +For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Multiple conversions: + Package coerces types where appropriate + e.g. decode an int in the stream into a float, etc. + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Encode a struct as an array, and decode struct from an array in the data stream + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. + +The Encoder and Decoder are NOT safe for concurrent use. + +Consequently, the usage model is basically: + + - Create and initialize the Handle before any use. + Once created, DO NOT modify it. + - Multiple Encoders or Decoders can now use the Handle concurrently. + They only read information off the Handle (never write). + - However, each Encoder or Decoder MUST not be used concurrently + - To re-use an Encoder/Decoder, call Reset(...) on it first. + This allows you use state maintained on the Encoder/Decoder. + +Sample usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +*/ +package codec + +// Benefits of go-codec: +// +// - encoding/json always reads whole file into memory first. +// This makes it unsuitable for parsing very large files. +// - encoding/xml cannot parse into a map[string]interface{} +// I found this out on reading https://github.com/clbanning/mxj + +// TODO: +// +// - optimization for codecgen: +// if len of entity is <= 3 words, then support a value receiver for encode. +// - (En|De)coder should store an error when it occurs. +// Until reset, subsequent calls return that error that was stored. +// This means that free panics must go away. +// All errors must be raised through errorf method. +// - Decoding using a chan is good, but incurs concurrency costs. +// This is because there's no fast way to use a channel without it +// having to switch goroutines constantly. +// Callback pattern is still the best. Maybe consider supporting something like: +// type X struct { +// Name string +// Ys []Y +// Ys chan <- Y +// Ys func(Y) -> call this function for each entry +// } +// - Consider adding a isZeroer interface { isZero() bool } +// It is used within isEmpty, for omitEmpty support. +// - Consider making Handle used AS-IS within the encoding/decoding session. +// This means that we don't cache Handle information within the (En|De)coder, +// except we really need it at Reset(...) +// - Consider adding math/big support +// - Consider reducing the size of the generated functions: +// Maybe use one loop, and put the conditionals in the loop. +// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } } diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md new file mode 100644 index 0000000..91cb3a2 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/README.md @@ -0,0 +1,148 @@ +# Codec + +High Performance, Feature-Rich Idiomatic Go codec/encoding library for +binc, msgpack, cbor, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +To install: + + go get github.com/ugorji/go/codec + +This package understands the `unsafe` tag, to allow using unsafe semantics: + + - When decoding into a struct, you need to read the field name as a string + so you can find the struct field it is mapped to. + Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion. + +To use it, you must pass the `unsafe` tag during install: + +``` +go install -tags=unsafe github.com/ugorji/go/codec +``` + +Online documentation: http://godoc.org/github.com/ugorji/go/codec +Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Multiple conversions: + Package coerces types where appropriate + e.g. decode an int in the stream into a float, etc. + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Encode a struct as an array, and decode struct from an array in the data stream + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +## Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +## RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +## Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go new file mode 100644 index 0000000..bdb2d5f --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/binc.go @@ -0,0 +1,929 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" + "time" +) + +const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + bincVdUnicodeOther + bincVdSymbol + + bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + bincFlBin16 byte = iota + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +type bincEncDriver struct { + e *Encoder + w encWriter + m map[string]uint16 // symbols + b [scratchByteArrayLen]byte + s uint16 // symbols sequencer + encNoSeparator +} + +func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) { + if rt == timeTypId { + var bs []byte + switch x := v.(type) { + case time.Time: + bs = encodeTime(x) + case *time.Time: + bs = encodeTime(*x) + default: + e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v) + } + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriver) EncodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriver) EncodeFloat32(f float32) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *bincEncDriver) EncodeFloat64(f float64) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + bigen.PutUint64(e.b[:8], math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (e.b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(e.b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writeb(e.b[:8]) +} + +func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { + if lim == 4 { + bigen.PutUint32(e.b[:lim], uint32(v)) + } else { + bigen.PutUint64(e.b[:lim], v) + } + if bincDoPrune { + i := pruneSignExt(e.b[:lim], pos) + e.w.writen1(bd | lim - 1 - byte(i)) + e.w.writeb(e.b[i:lim]) + } else { + e.w.writen1(bd | lim - 1) + e.w.writeb(e.b[:lim]) + } +} + +func (e *bincEncDriver) EncodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + if v >= 0 { + e.encUint(bincVdPosInt<<4, true, uint64(v)) + } else if v == -1 { + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + } else { + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriver) EncodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { + if v == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + } else if pos && v >= 1 && v <= 16 { + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + } else if v <= math.MaxUint8 { + e.w.writen2(bd|0x0, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.encIntegerPrune(bd, pos, v, 4) + } else { + e.encIntegerPrune(bd, pos, v, 8) + } +} + +func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriver) EncodeArrayStart(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriver) EncodeMapStart(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriver) EncodeString(c charEncoding, v string) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeSymbol(v string) { + // if WriteSymbolsNoRefs { + // e.encodeString(c_UTF8, v) + // return + // } + + //symbols only offer benefit when string length > 1. + //This is because strings with length 1 take only 2 bytes to store + //(bd with embedded length, and single byte for string val). + + l := len(v) + if l == 0 { + e.encBytesLen(c_UTF8, 0) + return + } else if l == 1 { + e.encBytesLen(c_UTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + } else { + e.s++ + ui = e.s + //ui = uint16(atomic.AddUint32(&e.s, 1)) + e.m[v] = ui + var lenprec uint8 + if l <= math.MaxUint8 { + // lenprec = 0 + } else if l <= math.MaxUint16 { + lenprec = 1 + } else if int64(l) <= math.MaxUint32 { + lenprec = 2 + } else { + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + if lenprec == 0 { + e.w.writen1(byte(l)) + } else if lenprec == 1 { + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) + } else if lenprec == 2 { + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) + } else { + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { + //TODO: support bincUnicodeOther (for now, just use string or bytearray) + if c == c_RAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriver) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { + if v <= math.MaxUint8 { + e.w.writen2(bd, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd | 0x02) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { + e.w.writen1(bd | 0x03) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) + } +} + +//------------------------------------ + +type bincDecSymbol struct { + s string + b []byte + i uint16 +} + +type bincDecDriver struct { + d *Decoder + h *BincHandle + r decReader + br bool // bytes reader + bdRead bool + bd byte + vd byte + vs byte + noStreamingCodec + decNoSeparator + b [scratchByteArrayLen]byte + + // linear searching on this slice is ok, + // because we typically expect < 32 symbols in each stream. + s []bincDecSymbol +} + +func (d *bincDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true +} + +func (d *bincDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *bincDecDriver) ContainerType() (vt valueType) { + if d.vd == bincVdSpecial && d.vs == bincSpNil { + return valueTypeNil + } else if d.vd == bincVdByteArray { + return valueTypeBytes + } else if d.vd == bincVdString { + return valueTypeString + } else if d.vd == bincVdArray { + return valueTypeArray + } else if d.vd == bincVdMap { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *bincDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return true + } + return false +} + +func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) { + if !d.bdRead { + d.readNextBd() + } + if rt == timeTypId { + if d.vd != bincVdTimestamp { + d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) + return + } + tt, err := decodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + var vt *time.Time = v.(*time.Time) + *vt = tt + d.bdRead = false + } +} + +func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { + if vs&0x8 == 0 { + d.r.readb(d.b[0:defaultLen]) + } else { + l := d.r.readn1() + if l > 8 { + d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l) + return + } + for i := l; i < 8; i++ { + d.b[i] = 0 + } + d.r.readb(d.b[0:l]) + } +} + +func (d *bincDecDriver) decFloat() (f float64) { + //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } + if x := d.vs & 0x7; x == bincFlBin32 { + d.decFloatPre(d.vs, 4) + f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) + } else if x == bincFlBin64 { + d.decFloatPre(d.vs, 8) + f = math.Float64frombits(bigen.Uint64(d.b[0:8])) + } else { + d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + return + } + return +} + +func (d *bincDecDriver) decUint() (v uint64) { + // need to inline the code (interface conversion and type assertion expensive) + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + case 2: + d.b[4] = 0 + d.r.readb(d.b[5:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 3: + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 4, 5, 6: + lim := int(7 - d.vs) + d.r.readb(d.b[lim:8]) + for i := 0; i < lim; i++ { + d.b[i] = 0 + } + v = uint64(bigen.Uint64(d.b[:8])) + case 7: + d.r.readb(d.b[:8]) + v = uint64(bigen.Uint64(d.b[:8])) + default: + d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") + return + } + return +} + +func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdPosInt { + ui = d.decUint() + } else if vd == bincVdNegInt { + ui = d.decUint() + neg = true + } else if vd == bincVdSmallInt { + ui = uint64(d.vs) + 1 + } else if vd == bincVdSpecial { + if vs == bincSpZero { + //i = 0 + } else if vs == bincSpNegOne { + neg = true + ui = 1 + } else { + d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs) + return + } + } else { + d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + return + } + return +} + +func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) { + ui, neg := d.decCheckInteger() + i, overflow := chkOvf.SignedInt(ui) + if overflow { + d.d.errorf("simple: overflow converting %v to signed integer", ui) + return + } + if neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("binc: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("binc: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdSpecial { + d.bdRead = false + if vs == bincSpNan { + return math.NaN() + } else if vs == bincSpPosInf { + return math.Inf(1) + } else if vs == bincSpZeroFloat || vs == bincSpZero { + return + } else if vs == bincSpNegInf { + return math.Inf(-1) + } else { + d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + return + } + } else if vd == bincVdFloat { + f = d.decFloat() + } else { + f = float64(d.DecodeInt(64)) + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("binc: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *bincDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { + // b = false + } else if bd == (bincVdSpecial | bincSpTrue) { + b = true + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadMapStart() (length int) { + if d.vd != bincVdMap { + d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadArrayStart() (length int) { + if d.vd != bincVdArray { + d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) decLen() int { + if d.vs > 3 { + return int(d.vs - 4) + } + return int(d.decLenNumber()) +} + +func (d *bincDecDriver) decLenNumber() (v uint64) { + if x := d.vs; x == 0 { + v = uint64(d.r.readn1()) + } else if x == 1 { + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + } else if x == 2 { + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + } else { + d.r.readb(d.b[:8]) + v = bigen.Uint64(d.b[:8]) + } + return +} + +func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return + } + var slen int = -1 + // var ok bool + switch d.vd { + case bincVdString, bincVdByteArray: + slen = d.decLen() + if zerocopy { + if d.br { + bs2 = d.r.readx(slen) + } else if len(bs) == 0 { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:]) + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + if withString { + s = string(bs2) + } + case bincVdSymbol: + // zerocopy doesn't apply for symbols, + // as the values must be stored in a table for later use. + // + //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, + //extract symbol + //if containsStringVal, read it and put in map + //else look in map for string value + var symbol uint16 + vs := d.vs + if vs&0x8 == 0 { + symbol = uint16(d.r.readn1()) + } else { + symbol = uint16(bigen.Uint16(d.r.readx(2))) + } + if d.s == nil { + d.s = make([]bincDecSymbol, 0, 16) + } + + if vs&0x4 == 0 { + for i := range d.s { + j := &d.s[i] + if j.i == symbol { + bs2 = j.b + if withString { + if j.s == "" && bs2 != nil { + j.s = string(bs2) + } + s = j.s + } + break + } + } + } else { + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(bigen.Uint16(d.r.readx(2))) + case 2: + slen = int(bigen.Uint32(d.r.readx(4))) + case 3: + slen = int(bigen.Uint64(d.r.readx(8))) + } + // since using symbols, do not store any part of + // the parameter bs in the map, as it might be a shared buffer. + // bs2 = decByteSlice(d.r, slen, bs) + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil) + if withString { + s = string(bs2) + } + d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) + } + default: + d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeString() (s string) { + // DecodeBytes does not accommodate symbols, whose impl stores string version in map. + // Use decStringAndBytes directly. + // return string(d.DecodeBytes(d.b[:], true, true)) + _, s = d.decStringAndBytes(d.b[:], true, true) + return +} + +func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if isstring { + bsOut, _ = d.decStringAndBytes(bs, false, zerocopy) + return + } + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return nil + } + var clen int + if d.vd == bincVdString || d.vd == bincVdByteArray { + clen = d.decLen() + } else { + d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, d.vd) + return + } + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdCustomExt { + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + } else if d.vd == bincVdByteArray { + xbs = d.DecodeBytes(nil, false, true) + } else { + d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := &d.d.n + var decodeFurther bool + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + n.v = valueTypeNil + case bincSpFalse: + n.v = valueTypeBool + n.b = false + case bincSpTrue: + n.v = valueTypeBool + n.b = true + case bincSpNan: + n.v = valueTypeFloat + n.f = math.NaN() + case bincSpPosInf: + n.v = valueTypeFloat + n.f = math.Inf(1) + case bincSpNegInf: + n.v = valueTypeFloat + n.f = math.Inf(-1) + case bincSpZeroFloat: + n.v = valueTypeFloat + n.f = float64(0) + case bincSpZero: + n.v = valueTypeUint + n.u = uint64(0) // int8(0) + case bincSpNegOne: + n.v = valueTypeInt + n.i = int64(-1) // int8(-1) + default: + d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + n.v = valueTypeUint + n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + case bincVdPosInt: + n.v = valueTypeUint + n.u = d.decUint() + case bincVdNegInt: + n.v = valueTypeInt + n.i = -(int64(d.decUint())) + case bincVdFloat: + n.v = valueTypeFloat + n.f = d.decFloat() + case bincVdSymbol: + n.v = valueTypeSymbol + n.s = d.DecodeString() + case bincVdString: + n.v = valueTypeString + n.s = d.DecodeString() + case bincVdByteArray: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case bincVdTimestamp: + n.v = valueTypeTimestamp + tt, err := decodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + n.t = tt + case bincVdCustomExt: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case bincVdArray: + n.v = valueTypeArray + decodeFurther = true + case bincVdMap: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + } + + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } + return +} + +//------------------------------------ + +//BincHandle is a Handle for the Binc Schema-Free Encoding Format +//defined at https://github.com/ugorji/binc . +// +//BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +// +//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + BasicHandle + binaryEncodingType +} + +func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *BincHandle) newEncDriver(e *Encoder) encDriver { + return &bincEncDriver{e: e, w: e.w} +} + +func (h *BincHandle) newDecDriver(d *Decoder) decDriver { + return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *bincEncDriver) reset() { + e.w = e.e.w + e.s = 0 + e.m = nil +} + +func (d *bincDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.s = nil + d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 +} + +var _ decDriver = (*bincDecDriver)(nil) +var _ encDriver = (*bincEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go new file mode 100644 index 0000000..ce3e0e0 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/cbor.go @@ -0,0 +1,592 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" +) + +const ( + cborMajorUint byte = iota + cborMajorNegInt + cborMajorBytes + cborMajorText + cborMajorArray + cborMajorMap + cborMajorTag + cborMajorOther +) + +const ( + cborBdFalse byte = 0xf4 + iota + cborBdTrue + cborBdNil + cborBdUndefined + cborBdExt + cborBdFloat16 + cborBdFloat32 + cborBdFloat64 +) + +const ( + cborBdIndefiniteBytes byte = 0x5f + cborBdIndefiniteString = 0x7f + cborBdIndefiniteArray = 0x9f + cborBdIndefiniteMap = 0xbf + cborBdBreak = 0xff +) + +const ( + CborStreamBytes byte = 0x5f + CborStreamString = 0x7f + CborStreamArray = 0x9f + CborStreamMap = 0xbf + CborStreamBreak = 0xff +) + +const ( + cborBaseUint byte = 0x00 + cborBaseNegInt = 0x20 + cborBaseBytes = 0x40 + cborBaseString = 0x60 + cborBaseArray = 0x80 + cborBaseMap = 0xa0 + cborBaseTag = 0xc0 + cborBaseSimple = 0xe0 +) + +// ------------------- + +type cborEncDriver struct { + noBuiltInTypes + encNoSeparator + e *Encoder + w encWriter + h *CborHandle + x [8]byte +} + +func (e *cborEncDriver) EncodeNil() { + e.w.writen1(cborBdNil) +} + +func (e *cborEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(cborBdTrue) + } else { + e.w.writen1(cborBdFalse) + } +} + +func (e *cborEncDriver) EncodeFloat32(f float32) { + e.w.writen1(cborBdFloat32) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *cborEncDriver) EncodeFloat64(f float64) { + e.w.writen1(cborBdFloat64) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *cborEncDriver) encUint(v uint64, bd byte) { + if v <= 0x17 { + e.w.writen1(byte(v) + bd) + } else if v <= math.MaxUint8 { + e.w.writen2(bd+0x18, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 0x19) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 0x1a) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 0x1b) + bigenHelper{e.x[:8], e.w}.writeUint64(v) + } +} + +func (e *cborEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-1-v), cborBaseNegInt) + } else { + e.encUint(uint64(v), cborBaseUint) + } +} + +func (e *cborEncDriver) EncodeUint(v uint64) { + e.encUint(v, cborBaseUint) +} + +func (e *cborEncDriver) encLen(bd byte, length int) { + e.encUint(uint64(length), bd) +} + +func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + e.encUint(uint64(xtag), cborBaseTag) + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + e.encUint(uint64(re.Tag), cborBaseTag) + if re.Data != nil { + en.encode(re.Data) + } else if re.Value == nil { + e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *cborEncDriver) EncodeArrayStart(length int) { + e.encLen(cborBaseArray, length) +} + +func (e *cborEncDriver) EncodeMapStart(length int) { + e.encLen(cborBaseMap, length) +} + +func (e *cborEncDriver) EncodeString(c charEncoding, v string) { + e.encLen(cborBaseString, len(v)) + e.w.writestr(v) +} + +func (e *cborEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + if c == c_RAW { + e.encLen(cborBaseBytes, len(v)) + } else { + e.encLen(cborBaseString, len(v)) + } + e.w.writeb(v) +} + +// ---------------------- + +type cborDecDriver struct { + d *Decoder + h *CborHandle + r decReader + b [scratchByteArrayLen]byte + br bool // bytes reader + bdRead bool + bd byte + noBuiltInTypes + decNoSeparator +} + +func (d *cborDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *cborDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *cborDecDriver) ContainerType() (vt valueType) { + if d.bd == cborBdNil { + return valueTypeNil + } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { + return valueTypeBytes + } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { + return valueTypeString + } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { + return valueTypeArray + } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *cborDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + // treat Nil and Undefined as nil values + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) CheckBreak() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdBreak { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) decUint() (ui uint64) { + v := d.bd & 0x1f + if v <= 0x17 { + ui = uint64(v) + } else { + if v == 0x18 { + ui = uint64(d.r.readn1()) + } else if v == 0x19 { + ui = uint64(bigen.Uint16(d.r.readx(2))) + } else if v == 0x1a { + ui = uint64(bigen.Uint32(d.r.readx(4))) + } else if v == 0x1b { + ui = uint64(bigen.Uint64(d.r.readx(8))) + } else { + d.d.errorf("decUint: Invalid descriptor: %v", d.bd) + return + } + } + return +} + +func (d *cborDecDriver) decCheckInteger() (neg bool) { + if !d.bdRead { + d.readNextBd() + } + major := d.bd >> 5 + if major == cborMajorUint { + } else if major == cborMajorNegInt { + neg = true + } else { + d.d.errorf("invalid major: %v (bd: %v)", major, d.bd) + return + } + return +} + +func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) { + neg := d.decCheckInteger() + ui := d.decUint() + // check if this number can be converted to an int without overflow + var overflow bool + if neg { + if i, overflow = chkOvf.SignedInt(ui + 1); overflow { + d.d.errorf("cbor: overflow converting %v to signed integer", ui+1) + return + } + i = -i + } else { + if i, overflow = chkOvf.SignedInt(ui); overflow { + d.d.errorf("cbor: overflow converting %v to signed integer", ui) + return + } + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("cbor: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + if d.decCheckInteger() { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + ui = d.decUint() + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("cbor: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdFloat16 { + f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) + } else if bd == cborBdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if bd == cborBdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else if bd >= cborBaseUint && bd < cborBaseBytes { + f = float64(d.DecodeInt(64)) + } else { + d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd) + return + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("cbor: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *cborDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdTrue { + b = true + } else if bd == cborBdFalse { + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) ReadMapStart() (length int) { + d.bdRead = false + if d.bd == cborBdIndefiniteMap { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) ReadArrayStart() (length int) { + d.bdRead = false + if d.bd == cborBdIndefiniteArray { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) decLen() int { + return int(d.decUint()) +} + +func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { + d.bdRead = false + for { + if d.CheckBreak() { + break + } + if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { + d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd) + return nil + } + n := d.decLen() + oldLen := len(bs) + newLen := oldLen + n + if newLen > cap(bs) { + bs2 := make([]byte, newLen, 2*cap(bs)+n) + copy(bs2, bs) + bs = bs2 + } else { + bs = bs[:newLen] + } + d.r.readb(bs[oldLen:newLen]) + // bs = append(bs, d.r.readn()...) + d.bdRead = false + } + d.bdRead = false + return bs +} + +func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return nil + } + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + if bs == nil { + return d.decAppendIndefiniteBytes(nil) + } + return d.decAppendIndefiniteBytes(bs[:0]) + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *cborDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true, true)) +} + +func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if !d.bdRead { + d.readNextBd() + } + u := d.decUint() + d.bdRead = false + realxtag = u + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + d.d.decode(&re.Value) + } else if xtag != realxtag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) + return + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := &d.d.n + var decodeFurther bool + + switch d.bd { + case cborBdNil: + n.v = valueTypeNil + case cborBdFalse: + n.v = valueTypeBool + n.b = false + case cborBdTrue: + n.v = valueTypeBool + n.b = true + case cborBdFloat16, cborBdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat(true) + case cborBdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat(false) + case cborBdIndefiniteBytes: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case cborBdIndefiniteString: + n.v = valueTypeString + n.s = d.DecodeString() + case cborBdIndefiniteArray: + n.v = valueTypeArray + decodeFurther = true + case cborBdIndefiniteMap: + n.v = valueTypeMap + decodeFurther = true + default: + switch { + case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt(64) + } else { + n.v = valueTypeUint + n.u = d.DecodeUint(64) + } + case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: + n.v = valueTypeInt + n.i = d.DecodeInt(64) + case d.bd >= cborBaseBytes && d.bd < cborBaseString: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case d.bd >= cborBaseString && d.bd < cborBaseArray: + n.v = valueTypeString + n.s = d.DecodeString() + case d.bd >= cborBaseArray && d.bd < cborBaseMap: + n.v = valueTypeArray + decodeFurther = true + case d.bd >= cborBaseMap && d.bd < cborBaseTag: + n.v = valueTypeMap + decodeFurther = true + case d.bd >= cborBaseTag && d.bd < cborBaseSimple: + n.v = valueTypeExt + n.u = d.decUint() + n.l = nil + // d.bdRead = false + // d.d.decode(&re.Value) // handled by decode itself. + // decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + return + } + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +// ------------------------- + +// CborHandle is a Handle for the CBOR encoding format, +// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . +// +// CBOR is comprehensively supported, including support for: +// - indefinite-length arrays/maps/bytes/strings +// - (extension) tags in range 0..0xffff (0 .. 65535) +// - half, single and double-precision floats +// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) +// - nil, true, false, ... +// - arrays and maps, bytes and text strings +// +// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. +// Users can implement them as needed (using SetExt), including spec-documented ones: +// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. +// +// To encode with indefinite lengths (streaming), users will use +// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants. +// +// For example, to encode "one-byte" as an indefinite length string: +// var buf bytes.Buffer +// e := NewEncoder(&buf, new(CborHandle)) +// buf.WriteByte(CborStreamString) +// e.MustEncode("one-") +// e.MustEncode("byte") +// buf.WriteByte(CborStreamBreak) +// encodedBytes := buf.Bytes() +// var vv interface{} +// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv) +// // Now, vv contains the same string "one-byte" +// +type CborHandle struct { + binaryEncodingType + BasicHandle +} + +func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + +func (h *CborHandle) newEncDriver(e *Encoder) encDriver { + return &cborEncDriver{e: e, w: e.w, h: h} +} + +func (h *CborHandle) newDecDriver(d *Decoder) decDriver { + return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *cborEncDriver) reset() { + e.w = e.e.w +} + +func (d *cborDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*cborDecDriver)(nil) +var _ encDriver = (*cborEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go new file mode 100644 index 0000000..cbe2df4 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode.go @@ -0,0 +1,2067 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "errors" + "fmt" + "io" + "reflect" + "time" +) + +// Some tagging information for error messages. +const ( + msgBadDesc = "Unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +var ( + onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct") + cannotDecodeIntoNilErr = errors.New("cannot decode into nil") +) + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +type decReader interface { + unreadn1() + + // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR + // just return a view of the []byte being decoded from. + // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. + readx(n int) []byte + readb([]byte) + readn1() uint8 + readn1eof() (v uint8, eof bool) + numread() int // number of bytes read + track() + stopTrack() []byte +} + +type decReaderByteScanner interface { + io.Reader + io.ByteScanner +} + +type decDriver interface { + // this will check if the next token is a break. + CheckBreak() bool + TryDecodeAsNil() bool + // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. + ContainerType() (vt valueType) + IsBuiltinType(rt uintptr) bool + DecodeBuiltin(rt uintptr, v interface{}) + + // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. + // For maps and arrays, it will not do the decoding in-band, but will signal + // the decoder, so that is done later, by setting the decNaked.valueType field. + // + // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + // for extensions, DecodeNaked must read the tag and the []byte if it exists. + // if the []byte is not read, then kInterfaceNaked will treat it as a Handle + // that stores the subsequent value in-band, and complete reading the RawExt. + // + // extensions should also use readx to decode them, for efficiency. + // kInterface will extract the detached byte slice if it has to pass it outside its realm. + DecodeNaked() + DecodeInt(bitsize uint8) (i int64) + DecodeUint(bitsize uint8) (ui uint64) + DecodeFloat(chkOverflow32 bool) (f float64) + DecodeBool() (b bool) + // DecodeString can also decode symbols. + // It looks redundant as DecodeBytes is available. + // However, some codecs (e.g. binc) support symbols and can + // return a pre-stored string value, meaning that it can bypass + // the cost of []byte->string conversion. + DecodeString() (s string) + + // DecodeBytes may be called directly, without going through reflection. + // Consequently, it must be designed to handle possible nil. + DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + + // decodeExt will decode into a *RawExt or into an extension. + DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) + // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + ReadMapStart() int + ReadArrayStart() int + + reset() + uncacheRead() +} + +type decNoSeparator struct { +} + +func (_ decNoSeparator) ReadEnd() {} + +// func (_ decNoSeparator) uncacheRead() {} + +type DecodeOptions struct { + // MapType specifies type to use during schema-less decoding of a map in the stream. + // If nil, we use map[interface{}]interface{} + MapType reflect.Type + + // SliceType specifies type to use during schema-less decoding of an array in the stream. + // If nil, we use []interface{} + SliceType reflect.Type + + // MaxInitLen defines the maxinum initial length that we "make" a collection (string, slice, map, chan). + // If 0 or negative, we default to a sensible value based on the size of an element in the collection. + // + // For example, when decoding, a stream may say that it has 2^64 elements. + // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash. + // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. + MaxInitLen int + + // If ErrorIfNoField, return an error when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool + + // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. + // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, + // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). + ErrorIfNoArrayExpand bool + + // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). + SignedInteger bool + + // MapValueReset controls how we decode into a map value. + // + // By default, we MAY retrieve the mapping for a key, and then decode into that. + // However, especially with big maps, that retrieval may be expensive and unnecessary + // if the stream already contains all that is necessary to recreate the value. + // + // If true, we will never retrieve the previous mapping, + // but rather decode into a new value and set that in the map. + // + // If false, we will retrieve the previous mapping if necessary e.g. + // the previous mapping is a pointer, or is a struct or array with pre-set state, + // or is an interface. + MapValueReset bool + + // InterfaceReset controls how we decode into an interface. + // + // By default, when we see a field that is an interface{...}, + // or a map with interface{...} value, we will attempt decoding into the + // "contained" value. + // + // However, this prevents us from reading a string into an interface{} + // that formerly contained a number. + // + // If true, we will decode into a new "blank" value, and set that in the interface. + // If false, we will decode into whatever is contained in the interface. + InterfaceReset bool + + // InternString controls interning of strings during decoding. + // + // Some handles, e.g. json, typically will read map keys as strings. + // If the set of keys are finite, it may help reduce allocation to + // look them up from a map (than to allocate them afresh). + // + // Note: Handles will be smart when using the intern functionality. + // So everything will not be interned. + InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool +} + +// ------------------------------------ + +// ioDecByteScanner implements Read(), ReadByte(...), UnreadByte(...) methods +// of io.Reader, io.ByteScanner. +type ioDecByteScanner struct { + r io.Reader + l byte // last byte + ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread + b [1]byte // tiny buffer for reading single bytes +} + +func (z *ioDecByteScanner) Read(p []byte) (n int, err error) { + var firstByte bool + if z.ls == 1 { + z.ls = 2 + p[0] = z.l + if len(p) == 1 { + n = 1 + return + } + firstByte = true + p = p[1:] + } + n, err = z.r.Read(p) + if n > 0 { + if err == io.EOF && n == len(p) { + err = nil // read was successful, so postpone EOF (till next time) + } + z.l = p[n-1] + z.ls = 2 + } + if firstByte { + n++ + } + return +} + +func (z *ioDecByteScanner) ReadByte() (c byte, err error) { + n, err := z.Read(z.b[:]) + if n == 1 { + c = z.b[0] + if err == io.EOF { + err = nil // read was successful, so postpone EOF (till next time) + } + } + return +} + +func (z *ioDecByteScanner) UnreadByte() (err error) { + x := z.ls + if x == 0 { + err = errors.New("cannot unread - nothing has been read") + } else if x == 1 { + err = errors.New("cannot unread - last byte has not been read") + } else if x == 2 { + z.ls = 1 + } + return +} + +// ioDecReader is a decReader that reads off an io.Reader +type ioDecReader struct { + br decReaderByteScanner + // temp byte array re-used internally for efficiency during read. + // shares buffer with Decoder, so we keep size of struct within 8 words. + x *[scratchByteArrayLen]byte + bs ioDecByteScanner + n int // num read + tr []byte // tracking bytes read + trb bool +} + +func (z *ioDecReader) numread() int { + return z.n +} + +func (z *ioDecReader) readx(n int) (bs []byte) { + if n <= 0 { + return + } + if n < len(z.x) { + bs = z.x[:n] + } else { + bs = make([]byte, n) + } + if _, err := io.ReadAtLeast(z.br, bs, n); err != nil { + panic(err) + } + z.n += len(bs) + if z.trb { + z.tr = append(z.tr, bs...) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + if len(bs) == 0 { + return + } + n, err := io.ReadAtLeast(z.br, bs, len(bs)) + z.n += n + if err != nil { + panic(err) + } + if z.trb { + z.tr = append(z.tr, bs...) + } +} + +func (z *ioDecReader) readn1() (b uint8) { + b, err := z.br.ReadByte() + if err != nil { + panic(err) + } + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + return b +} + +func (z *ioDecReader) readn1eof() (b uint8, eof bool) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else if err == io.EOF { + eof = true + } else { + panic(err) + } + return +} + +func (z *ioDecReader) unreadn1() { + err := z.br.UnreadByte() + if err != nil { + panic(err) + } + z.n-- + if z.trb { + if l := len(z.tr) - 1; l >= 0 { + z.tr = z.tr[:l] + } + } +} + +func (z *ioDecReader) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *ioDecReader) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ------------------------------------ + +var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read") + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available + t int // track start +} + +func (z *bytesDecReader) reset(in []byte) { + z.b = in + z.a = len(in) + z.c = 0 + z.t = 0 +} + +func (z *bytesDecReader) numread() int { + return z.c +} + +func (z *bytesDecReader) unreadn1() { + if z.c == 0 || len(z.b) == 0 { + panic(bytesDecReaderCannotUnreadErr) + } + z.c-- + z.a++ + return +} + +func (z *bytesDecReader) readx(n int) (bs []byte) { + // slicing from a non-constant start position is more expensive, + // as more computation is required to decipher the pointer start position. + // However, we do it only once, and it's better than reslicing both z.b and return value. + + if n <= 0 { + } else if z.a == 0 { + panic(io.EOF) + } else if n > z.a { + panic(io.ErrUnexpectedEOF) + } else { + c0 := z.c + z.c = c0 + n + z.a = z.a - n + bs = z.b[c0:z.c] + } + return +} + +func (z *bytesDecReader) readn1() (v uint8) { + if z.a == 0 { + panic(io.EOF) + } + v = z.b[z.c] + z.c++ + z.a-- + return +} + +func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { + if z.a == 0 { + eof = true + return + } + v = z.b[z.c] + z.c++ + z.a-- + return +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readx(len(bs))) +} + +func (z *bytesDecReader) track() { + z.t = z.c +} + +func (z *bytesDecReader) stopTrack() (bs []byte) { + return z.b[z.t:z.c] +} + +// ------------------------------------ + +type decFnInfo struct { + d *Decoder + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType +} + +// ---------------------------------------- + +type decFn struct { + i decFnInfo + f func(*decFnInfo, reflect.Value) +} + +func (f *decFnInfo) builtin(rv reflect.Value) { + f.d.d.DecodeBuiltin(f.ti.rtid, rv.Addr().Interface()) +} + +func (f *decFnInfo) rawExt(rv reflect.Value) { + f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil) +} + +func (f *decFnInfo) raw(rv reflect.Value) { + rv.SetBytes(f.d.raw()) +} + +func (f *decFnInfo) ext(rv reflect.Value) { + f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn) +} + +func (f *decFnInfo) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) { + if indir == -1 { + v = rv.Addr().Interface() + } else if indir == 0 { + v = rv.Interface() + } else { + for j := int8(0); j < indir; j++ { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + v = rv.Interface() + } + return +} + +func (f *decFnInfo) selferUnmarshal(rv reflect.Value) { + f.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(f.d) +} + +func (f *decFnInfo) binaryUnmarshal(rv reflect.Value) { + bm := f.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler) + xbs := f.d.d.DecodeBytes(nil, false, true) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) textUnmarshal(rv reflect.Value) { + tm := f.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) + if fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) jsonUnmarshal(rv reflect.Value) { + tm := f.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler) + // bs := f.d.d.DecodeBytes(f.d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) kErr(rv reflect.Value) { + f.d.errorf("no decoding function defined for kind %v", rv.Kind()) +} + +func (f *decFnInfo) kString(rv reflect.Value) { + rv.SetString(f.d.d.DecodeString()) +} + +func (f *decFnInfo) kBool(rv reflect.Value) { + rv.SetBool(f.d.d.DecodeBool()) +} + +func (f *decFnInfo) kInt(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(intBitsize)) +} + +func (f *decFnInfo) kInt64(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(64)) +} + +func (f *decFnInfo) kInt32(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(32)) +} + +func (f *decFnInfo) kInt8(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(8)) +} + +func (f *decFnInfo) kInt16(rv reflect.Value) { + rv.SetInt(f.d.d.DecodeInt(16)) +} + +func (f *decFnInfo) kFloat32(rv reflect.Value) { + rv.SetFloat(f.d.d.DecodeFloat(true)) +} + +func (f *decFnInfo) kFloat64(rv reflect.Value) { + rv.SetFloat(f.d.d.DecodeFloat(false)) +} + +func (f *decFnInfo) kUint8(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(8)) +} + +func (f *decFnInfo) kUint64(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(64)) +} + +func (f *decFnInfo) kUint(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(uintBitsize)) +} + +func (f *decFnInfo) kUintptr(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(uintBitsize)) +} + +func (f *decFnInfo) kUint32(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(32)) +} + +func (f *decFnInfo) kUint16(rv reflect.Value) { + rv.SetUint(f.d.d.DecodeUint(16)) +} + +// func (f *decFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") +// if rv.IsNil() { +// rv.Set(reflect.New(rv.Type().Elem())) +// } +// f.d.decodeValue(rv.Elem()) +// } + +// var kIntfCtr uint64 + +func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { + // nil interface: + // use some hieristics to decode it appropriately + // based on the detected next value in the stream. + d := f.d + d.d.DecodeNaked() + n := &d.n + if n.v == valueTypeNil { + return + } + // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). + // if num := f.ti.rt.NumMethod(); num > 0 { + if f.ti.numMeth > 0 { + d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + return + } + // var useRvn bool + switch n.v { + case valueTypeMap: + // if d.h.MapType == nil || d.h.MapType == mapIntfIntfTyp { + // } else if d.h.MapType == mapStrIntfTyp { // for json performance + // } + if d.mtid == 0 || d.mtid == mapIntfIntfTypId { + l := len(n.ms) + n.ms = append(n.ms, nil) + var v2 interface{} = &n.ms[l] + d.decode(v2) + rvn = reflect.ValueOf(v2).Elem() + n.ms = n.ms[:l] + } else if d.mtid == mapStrIntfTypId { // for json performance + l := len(n.ns) + n.ns = append(n.ns, nil) + var v2 interface{} = &n.ns[l] + d.decode(v2) + rvn = reflect.ValueOf(v2).Elem() + n.ns = n.ns[:l] + } else { + rvn = reflect.New(d.h.MapType).Elem() + d.decodeValue(rvn, nil) + } + case valueTypeArray: + // if d.h.SliceType == nil || d.h.SliceType == intfSliceTyp { + if d.stid == 0 || d.stid == intfSliceTypId { + l := len(n.ss) + n.ss = append(n.ss, nil) + var v2 interface{} = &n.ss[l] + d.decode(v2) + n.ss = n.ss[:l] + rvn = reflect.ValueOf(v2).Elem() + if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { + rvn = reflectArrayOf(rvn) + } + } else { + rvn = reflect.New(d.h.SliceType).Elem() + d.decodeValue(rvn, nil) + } + case valueTypeExt: + var v interface{} + tag, bytes := n.u, n.l // calling decode below might taint the values + if bytes == nil { + l := len(n.is) + n.is = append(n.is, nil) + v2 := &n.is[l] + d.decode(v2) + v = *v2 + n.is = n.is[:l] + } + bfn := d.h.getExtForTag(tag) + if bfn == nil { + var re RawExt + re.Tag = tag + re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) + rvn = reflect.ValueOf(re) + } else { + rvnA := reflect.New(bfn.rt) + rvn = rvnA.Elem() + if bytes != nil { + bfn.ext.ReadExt(rvnA.Interface(), bytes) + } else { + bfn.ext.UpdateExt(rvnA.Interface(), v) + } + } + case valueTypeNil: + // no-op + case valueTypeInt: + rvn = reflect.ValueOf(&n.i).Elem() + case valueTypeUint: + rvn = reflect.ValueOf(&n.u).Elem() + case valueTypeFloat: + rvn = reflect.ValueOf(&n.f).Elem() + case valueTypeBool: + rvn = reflect.ValueOf(&n.b).Elem() + case valueTypeString, valueTypeSymbol: + rvn = reflect.ValueOf(&n.s).Elem() + case valueTypeBytes: + rvn = reflect.ValueOf(&n.l).Elem() + case valueTypeTimestamp: + rvn = reflect.ValueOf(&n.t).Elem() + default: + panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v)) + } + return +} + +func (f *decFnInfo) kInterface(rv reflect.Value) { + // debugf("\t===> kInterface") + + // Note: + // A consequence of how kInterface works, is that + // if an interface already contains something, we try + // to decode into what was there before. + // We do not replace with a generic value (as got from decodeNaked). + + var rvn reflect.Value + if rv.IsNil() { + rvn = f.kInterfaceNaked() + if rvn.IsValid() { + rv.Set(rvn) + } + } else if f.d.h.InterfaceReset { + rvn = f.kInterfaceNaked() + if rvn.IsValid() { + rv.Set(rvn) + } else { + // reset to zero value based on current type in there. + rv.Set(reflect.Zero(rv.Elem().Type())) + } + } else { + rvn = rv.Elem() + // Note: interface{} is settable, but underlying type may not be. + // Consequently, we have to set the reflect.Value directly. + // if underlying type is settable (e.g. ptr or interface), + // we just decode into it. + // Else we create a settable value, decode into it, and set on the interface. + if rvn.CanSet() { + f.d.decodeValue(rvn, nil) + } else { + rvn2 := reflect.New(rvn.Type()).Elem() + rvn2.Set(rvn) + f.d.decodeValue(rvn2, nil) + rv.Set(rvn2) + } + } +} + +func (f *decFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + d := f.d + dd := d.d + cr := d.cr + ctyp := dd.ContainerType() + if ctyp == valueTypeMap { + containerLen := dd.ReadMapStart() + if containerLen == 0 { + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return + } + tisfi := fti.sfi + hasLen := containerLen >= 0 + if hasLen { + for j := 0; j < containerLen; j++ { + // rvkencname := dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) + // rvksi := ti.getForEncName(rvkencname) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(si.field(rv, true), nil) + } + } else { + d.structFieldNotFound(-1, rvkencname) + } + } + } else { + for j := 0; !dd.CheckBreak(); j++ { + // rvkencname := dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + rvkencname := stringView(dd.DecodeBytes(f.d.b[:], true, true)) + // rvksi := ti.getForEncName(rvkencname) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(si.field(rv, true), nil) + } + } else { + d.structFieldNotFound(-1, rvkencname) + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + } else if ctyp == valueTypeArray { + containerLen := dd.ReadArrayStart() + if containerLen == 0 { + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + return + } + // Not much gain from doing it two ways for array. + // Arrays are not used as much for structs. + hasLen := containerLen >= 0 + for j, si := range fti.sfip { + if hasLen { + if j == containerLen { + break + } + } else if dd.CheckBreak() { + break + } + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(si.field(rv, true), nil) + } + } + if containerLen > len(fti.sfip) { + // read remaining values and throw away + for j := len(fti.sfip); j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + d.structFieldNotFound(j, "") + } + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + } else { + f.d.error(onlyMapOrArrayCanDecodeIntoStructErr) + return + } +} + +func (f *decFnInfo) kSlice(rv reflect.Value) { + // A slice can be set from a map or array in stream. + // This way, the order can be kept (as order is lost with map). + ti := f.ti + d := f.d + dd := d.d + rtelem0 := ti.rt.Elem() + ctyp := dd.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + // you can only decode bytes or string in the stream into a slice or array of bytes + if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { + f.d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt) + } + if f.seq == seqTypeChan { + bs2 := dd.DecodeBytes(nil, false, true) + ch := rv.Interface().(chan<- byte) + for _, b := range bs2 { + ch <- b + } + } else { + rvbs := rv.Bytes() + bs2 := dd.DecodeBytes(rvbs, false, false) + if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { + if rv.CanSet() { + rv.SetBytes(bs2) + } else { + copy(rvbs, bs2) + } + } + } + return + } + + // array := f.seq == seqTypeChan + + slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) + + // // an array can never return a nil slice. so no need to check f.array here. + if containerLenS == 0 { + if f.seq == seqTypeSlice { + if rv.IsNil() { + rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) + } else { + rv.SetLen(0) + } + } else if f.seq == seqTypeChan { + if rv.IsNil() { + rv.Set(reflect.MakeChan(ti.rt, 0)) + } + } + slh.End() + return + } + + rtelem := rtelem0 + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + fn := d.getDecFn(rtelem, true, true) + + var rv0, rv9 reflect.Value + rv0 = rv + rvChanged := false + + // for j := 0; j < containerLenS; j++ { + var rvlen int + if containerLenS > 0 { // hasLen + if f.seq == seqTypeChan { + if rv.IsNil() { + rvlen, _ = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) + rv.Set(reflect.MakeChan(ti.rt, rvlen)) + } + // handle chan specially: + for j := 0; j < containerLenS; j++ { + rv9 = reflect.New(rtelem0).Elem() + slh.ElemContainerState(j) + d.decodeValue(rv9, fn) + rv.Send(rv9) + } + } else { // slice or array + var truncated bool // says len of sequence is not same as expected number of elements + numToRead := containerLenS // if truncated, reset numToRead + + rvcap := rv.Cap() + rvlen = rv.Len() + if containerLenS > rvcap { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, containerLenS) + } else { + oldRvlenGtZero := rvlen > 0 + rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) + if truncated { + if rvlen <= rvcap { + rv.SetLen(rvlen) + } else { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } + } else { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } + if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { + reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) + } + rvcap = rvlen + } + numToRead = rvlen + } else if containerLenS != rvlen { + if f.seq == seqTypeSlice { + rv.SetLen(containerLenS) + rvlen = containerLenS + } + } + j := 0 + // we read up to the numToRead + for ; j < numToRead; j++ { + slh.ElemContainerState(j) + d.decodeValue(rv.Index(j), fn) + } + + // if slice, expand and read up to containerLenS (or EOF) iff truncated + // if array, swallow all the rest. + + if f.seq == seqTypeArray { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } else if truncated { // slice was truncated, as chan NOT in this block + for ; j < containerLenS; j++ { + rv = expandSliceValue(rv, 1) + rv9 = rv.Index(j) + if resetSliceElemToZeroValue { + rv9.Set(reflect.Zero(rtelem0)) + } + slh.ElemContainerState(j) + d.decodeValue(rv9, fn) + } + } + } + } else { + rvlen = rv.Len() + j := 0 + for ; !dd.CheckBreak(); j++ { + if f.seq == seqTypeChan { + slh.ElemContainerState(j) + rv9 = reflect.New(rtelem0).Elem() + d.decodeValue(rv9, fn) + rv.Send(rv9) + } else { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= rvlen { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, j+1) + decodeIntoBlank = true + } else { // if f.seq == seqTypeSlice + // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs + rv = expandSliceValue(rv, 1) + rv9 = rv.Index(j) + // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0)) + if resetSliceElemToZeroValue { + rv9.Set(reflect.Zero(rtelem0)) + } + rvlen++ + rvChanged = true + } + } else { // slice or array + rv9 = rv.Index(j) + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { // seqTypeSlice + d.decodeValue(rv9, fn) + } + } + } + if f.seq == seqTypeSlice { + if j < rvlen { + rv.SetLen(j) + } else if j == 0 && rv.IsNil() { + rv = reflect.MakeSlice(ti.rt, 0, 0) + rvChanged = true + } + } + } + slh.End() + + if rvChanged { + rv0.Set(rv) + } +} + +func (f *decFnInfo) kArray(rv reflect.Value) { + // f.d.decodeValue(rv.Slice(0, rv.Len())) + f.kSlice(rv.Slice(0, rv.Len())) +} + +func (f *decFnInfo) kMap(rv reflect.Value) { + d := f.d + dd := d.d + containerLen := dd.ReadMapStart() + cr := d.cr + ti := f.ti + if rv.IsNil() { + rv.Set(reflect.MakeMap(ti.rt)) + } + + if containerLen == 0 { + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return + } + + ktype, vtype := ti.rt.Key(), ti.rt.Elem() + ktypeId := reflect.ValueOf(ktype).Pointer() + vtypeKind := vtype.Kind() + var keyFn, valFn *decFn + var xtyp reflect.Type + for xtyp = ktype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { + } + keyFn = d.getDecFn(xtyp, true, true) + for xtyp = vtype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { + } + valFn = d.getDecFn(xtyp, true, true) + var mapGet, mapSet bool + if !f.d.h.MapValueReset { + // if pointer, mapGet = true + // if interface, mapGet = true if !DecodeNakedAlways (else false) + // if builtin, mapGet = false + // else mapGet = true + if vtypeKind == reflect.Ptr { + mapGet = true + } else if vtypeKind == reflect.Interface { + if !f.d.h.InterfaceReset { + mapGet = true + } + } else if !isImmutableKind(vtypeKind) { + mapGet = true + } + } + + var rvk, rvv, rvz reflect.Value + + // for j := 0; j < containerLen; j++ { + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + rvk = reflect.New(ktype).Elem() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + d.decodeValue(rvk, keyFn) + + // special case if a byte array. + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk.Bytes())) + } + } + mapSet = true // set to false if u do a get, and its a pointer, and exists + if mapGet { + rvv = rv.MapIndex(rvk) + if rvv.IsValid() { + if vtypeKind == reflect.Ptr { + mapSet = false + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + d.decodeValue(rvv, valFn) + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + } + } else { + for j := 0; !dd.CheckBreak(); j++ { + rvk = reflect.New(ktype).Elem() + if cr != nil { + cr.sendContainerState(containerMapKey) + } + d.decodeValue(rvk, keyFn) + + // special case if a byte array. + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk.Bytes())) + } + } + mapSet = true // set to false if u do a get, and its a pointer, and exists + if mapGet { + rvv = rv.MapIndex(rvk) + if rvv.IsValid() { + if vtypeKind == reflect.Ptr { + mapSet = false + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + } else { + if rvz.IsValid() { + rvz.Set(reflect.Zero(vtype)) + } else { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + d.decodeValue(rvv, valFn) + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +type decRtidFn struct { + rtid uintptr + fn decFn +} + +// decNaked is used to keep track of the primitives decoded. +// Without it, we would have to decode each primitive and wrap it +// in an interface{}, causing an allocation. +// In this model, the primitives are decoded in a "pseudo-atomic" fashion, +// so we can rest assured that no other decoding happens while these +// primitives are being decoded. +// +// maps and arrays are not handled by this mechanism. +// However, RawExt is, and we accommodate for extensions that decode +// RawExt from DecodeNaked, but need to decode the value subsequently. +// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. +// +// However, decNaked also keeps some arrays of default maps and slices +// used in DecodeNaked. This way, we can get a pointer to it +// without causing a new heap allocation. +// +// kInterfaceNaked will ensure that there is no allocation for the common +// uses. +type decNaked struct { + // r RawExt // used for RawExt, uint, []byte. + u uint64 + i int64 + f float64 + l []byte + s string + t time.Time + b bool + v valueType + + // stacks for reducing allocation + is []interface{} + ms []map[interface{}]interface{} + ns []map[string]interface{} + ss [][]interface{} + // rs []RawExt + + // keep arrays at the bottom? Chance is that they are not used much. + ia [4]interface{} + ma [4]map[interface{}]interface{} + na [4]map[string]interface{} + sa [4][]interface{} + // ra [2]RawExt +} + +func (n *decNaked) reset() { + if n.ss != nil { + n.ss = n.ss[:0] + } + if n.is != nil { + n.is = n.is[:0] + } + if n.ms != nil { + n.ms = n.ms[:0] + } + if n.ns != nil { + n.ns = n.ns[:0] + } +} + +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. + // Try to put things that go together to fit within a cache line (8 words). + + d decDriver + // NOTE: Decoder shouldn't call it's read methods, + // as the handler MAY need to do some coordination. + r decReader + // sa [initCollectionCap]decRtidFn + h *BasicHandle + hh Handle + + be bool // is binary encoding + bytes bool // is bytes reader + js bool // is json handle + + rb bytesDecReader + ri ioDecReader + cr containerStateRecv + + s []decRtidFn + f map[uintptr]*decFn + + // _ uintptr // for alignment purposes, so next one starts from a cache line + + // cache the mapTypeId and sliceTypeId for faster comparisons + mtid uintptr + stid uintptr + + n decNaked + b [scratchByteArrayLen]byte + is map[string]string // used for interning strings +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to pass in a memory buffered reader +// (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + d := newDecoder(h) + d.Reset(r) + return d +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + d := newDecoder(h) + d.ResetBytes(in) + return d +} + +func newDecoder(h Handle) *Decoder { + d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} + n := &d.n + // n.rs = n.ra[:0] + n.ms = n.ma[:0] + n.is = n.ia[:0] + n.ns = n.na[:0] + n.ss = n.sa[:0] + _, d.js = h.(*JsonHandle) + if d.h.InternString { + d.is = make(map[string]string, 32) + } + d.d = h.newDecDriver(d) + d.cr, _ = d.d.(containerStateRecv) + // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri}) + return d +} + +func (d *Decoder) resetCommon() { + d.n.reset() + d.d.reset() + // reset all things which were cached from the Handle, + // but could be changed. + d.mtid, d.stid = 0, 0 + if d.h.MapType != nil { + d.mtid = reflect.ValueOf(d.h.MapType).Pointer() + } + if d.h.SliceType != nil { + d.stid = reflect.ValueOf(d.h.SliceType).Pointer() + } +} + +func (d *Decoder) Reset(r io.Reader) { + d.ri.x = &d.b + // d.s = d.sa[:0] + d.ri.bs.r = r + var ok bool + d.ri.br, ok = r.(decReaderByteScanner) + if !ok { + d.ri.br = &d.ri.bs + } + d.r = &d.ri + d.resetCommon() +} + +func (d *Decoder) ResetBytes(in []byte) { + // d.s = d.sa[:0] + d.bytes = true + d.rb.reset(in) + d.r = &d.rb + d.resetCommon() +} + +// func (d *Decoder) sendContainerState(c containerState) { +// if d.cr != nil { +// d.cr.sendContainerState(c) +// } +// } + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// When decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +func (d *Decoder) Decode(v interface{}) (err error) { + defer panicToErr(&err) + d.decode(v) + return +} + +// this is not a smart swallow, as it allocates objects and does unnecessary work. +func (d *Decoder) swallowViaHammer() { + var blank interface{} + d.decodeValue(reflect.ValueOf(&blank).Elem(), nil) +} + +func (d *Decoder) swallow() { + // smarter decode that just swallows the content + dd := d.d + if dd.TryDecodeAsNil() { + return + } + cr := d.cr + switch dd.ContainerType() { + case valueTypeMap: + containerLen := dd.ReadMapStart() + clenGtEqualZero := containerLen >= 0 + for j := 0; ; j++ { + if clenGtEqualZero { + if j >= containerLen { + break + } + } else if dd.CheckBreak() { + break + } + if cr != nil { + cr.sendContainerState(containerMapKey) + } + d.swallow() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + d.swallow() + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + case valueTypeArray: + containerLenS := dd.ReadArrayStart() + clenGtEqualZero := containerLenS >= 0 + for j := 0; ; j++ { + if clenGtEqualZero { + if j >= containerLenS { + break + } + } else if dd.CheckBreak() { + break + } + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + d.swallow() + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + case valueTypeBytes: + dd.DecodeBytes(d.b[:], false, true) + case valueTypeString: + dd.DecodeBytes(d.b[:], true, true) + // dd.DecodeStringAsBytes(d.b[:]) + default: + // these are all primitives, which we can get from decodeNaked + // if RawExt using Value, complete the processing. + dd.DecodeNaked() + if n := &d.n; n.v == valueTypeExt && n.l == nil { + l := len(n.is) + n.is = append(n.is, nil) + v2 := &n.is[l] + d.decode(v2) + n.is = n.is[:l] + } + } +} + +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) MustDecode(v interface{}) { + d.decode(v) +} + +func (d *Decoder) decode(iv interface{}) { + // if ics, ok := iv.(Selfer); ok { + // ics.CodecDecodeSelf(d) + // return + // } + + if d.d.TryDecodeAsNil() { + switch v := iv.(type) { + case nil: + case *string: + *v = "" + case *bool: + *v = false + case *int: + *v = 0 + case *int8: + *v = 0 + case *int16: + *v = 0 + case *int32: + *v = 0 + case *int64: + *v = 0 + case *uint: + *v = 0 + case *uint8: + *v = 0 + case *uint16: + *v = 0 + case *uint32: + *v = 0 + case *uint64: + *v = 0 + case *float32: + *v = 0 + case *float64: + *v = 0 + case *[]uint8: + *v = nil + case *Raw: + *v = nil + case reflect.Value: + if v.Kind() != reflect.Ptr || v.IsNil() { + d.errNotValidPtrValue(v) + } + // d.chkPtrValue(v) + v = v.Elem() + if v.IsValid() { + v.Set(reflect.Zero(v.Type())) + } + default: + rv := reflect.ValueOf(iv) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + d.errNotValidPtrValue(rv) + } + // d.chkPtrValue(rv) + rv = rv.Elem() + if rv.IsValid() { + rv.Set(reflect.Zero(rv.Type())) + } + } + return + } + + switch v := iv.(type) { + case nil: + d.error(cannotDecodeIntoNilErr) + return + + case Selfer: + v.CodecDecodeSelf(d) + + case reflect.Value: + if v.Kind() != reflect.Ptr || v.IsNil() { + d.errNotValidPtrValue(v) + } + // d.chkPtrValue(v) + d.decodeValueNotNil(v.Elem(), nil) + + case *string: + *v = d.d.DecodeString() + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(d.d.DecodeInt(intBitsize)) + case *int8: + *v = int8(d.d.DecodeInt(8)) + case *int16: + *v = int16(d.d.DecodeInt(16)) + case *int32: + *v = int32(d.d.DecodeInt(32)) + case *int64: + *v = d.d.DecodeInt(64) + case *uint: + *v = uint(d.d.DecodeUint(uintBitsize)) + case *uint8: + *v = uint8(d.d.DecodeUint(8)) + case *uint16: + *v = uint16(d.d.DecodeUint(16)) + case *uint32: + *v = uint32(d.d.DecodeUint(32)) + case *uint64: + *v = d.d.DecodeUint(64) + case *float32: + *v = float32(d.d.DecodeFloat(true)) + case *float64: + *v = d.d.DecodeFloat(false) + case *[]uint8: + *v = d.d.DecodeBytes(*v, false, false) + + case *Raw: + *v = d.raw() + + case *interface{}: + d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil) + + default: + if !fastpathDecodeTypeSwitch(iv, d) { + d.decodeI(iv, true, false, false, false) + } + } +} + +func (d *Decoder) preDecodeValue(rv reflect.Value, tryNil bool) (rv2 reflect.Value, proceed bool) { + if tryNil && d.d.TryDecodeAsNil() { + // No need to check if a ptr, recursively, to determine + // whether to set value to nil. + // Just always set value to its zero type. + if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid + rv.Set(reflect.Zero(rv.Type())) + } + return + } + + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + return rv, true +} + +func (d *Decoder) decodeI(iv interface{}, checkPtr, tryNil, checkFastpath, checkCodecSelfer bool) { + rv := reflect.ValueOf(iv) + if checkPtr { + if rv.Kind() != reflect.Ptr || rv.IsNil() { + d.errNotValidPtrValue(rv) + } + // d.chkPtrValue(rv) + } + rv, proceed := d.preDecodeValue(rv, tryNil) + if proceed { + fn := d.getDecFn(rv.Type(), checkFastpath, checkCodecSelfer) + fn.f(&fn.i, rv) + } +} + +func (d *Decoder) decodeValue(rv reflect.Value, fn *decFn) { + if rv, proceed := d.preDecodeValue(rv, true); proceed { + if fn == nil { + fn = d.getDecFn(rv.Type(), true, true) + } + fn.f(&fn.i, rv) + } +} + +func (d *Decoder) decodeValueNotNil(rv reflect.Value, fn *decFn) { + if rv, proceed := d.preDecodeValue(rv, false); proceed { + if fn == nil { + fn = d.getDecFn(rv.Type(), true, true) + } + fn.f(&fn.i, rv) + } +} + +func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *decFn) { + rtid := reflect.ValueOf(rt).Pointer() + + // retrieve or register a focus'ed function for this type + // to eliminate need to do the retrieval multiple times + + // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } + var ok bool + if useMapForCodecCache { + fn, ok = d.f[rtid] + } else { + for i := range d.s { + v := &(d.s[i]) + if v.rtid == rtid { + fn, ok = &(v.fn), true + break + } + } + } + if ok { + return + } + + if useMapForCodecCache { + if d.f == nil { + d.f = make(map[uintptr]*decFn, initCollectionCap) + } + fn = new(decFn) + d.f[rtid] = fn + } else { + if d.s == nil { + d.s = make([]decRtidFn, 0, initCollectionCap) + } + d.s = append(d.s, decRtidFn{rtid: rtid}) + fn = &(d.s[len(d.s)-1]).fn + } + + // debugf("\tCreating new dec fn for type: %v\n", rt) + ti := d.h.getTypeInfo(rtid, rt) + fi := &(fn.i) + fi.d = d + fi.ti = ti + + // An extension can be registered for any type, regardless of the Kind + // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. + // + // We can't check if it's an extension byte here first, because the user may have + // registered a pointer or non-pointer type, meaning we may have to recurse first + // before matching a mapped type, even though the extension byte is already detected. + // + // NOTE: if decoding into a nil interface{}, we return a non-nil + // value except even if the container registers a length of 0. + if checkCodecSelfer && ti.cs { + fn.f = (*decFnInfo).selferUnmarshal + } else if rtid == rawExtTypId { + fn.f = (*decFnInfo).rawExt + } else if rtid == rawTypId { + fn.f = (*decFnInfo).raw + } else if d.d.IsBuiltinType(rtid) { + fn.f = (*decFnInfo).builtin + } else if xfFn := d.h.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.f = (*decFnInfo).ext + } else if supportMarshalInterfaces && d.be && ti.bunm { + fn.f = (*decFnInfo).binaryUnmarshal + } else if supportMarshalInterfaces && !d.be && d.js && ti.junm { + //If JSON, we should check JSONUnmarshal before textUnmarshal + fn.f = (*decFnInfo).jsonUnmarshal + } else if supportMarshalInterfaces && !d.be && ti.tunm { + fn.f = (*decFnInfo).textUnmarshal + } else { + rk := rt.Kind() + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if rt.PkgPath() == "" { + if idx := fastpathAV.index(rtid); idx != -1 { + fn.f = fastpathAV[idx].decfn + } + } else { + // use mapping for underlying type if there + ok = false + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(rt.Key(), rt.Elem()) + } else { + rtu = reflect.SliceOf(rt.Elem()) + } + rtuid := reflect.ValueOf(rtu).Pointer() + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].decfn + xrt := fastpathAV[idx].rt + fn.f = func(xf *decFnInfo, xrv reflect.Value) { + // xfnf(xf, xrv.Convert(xrt)) + xfnf(xf, xrv.Addr().Convert(reflect.PtrTo(xrt)).Elem()) + } + } + } + } + if fn.f == nil { + switch rk { + case reflect.String: + fn.f = (*decFnInfo).kString + case reflect.Bool: + fn.f = (*decFnInfo).kBool + case reflect.Int: + fn.f = (*decFnInfo).kInt + case reflect.Int64: + fn.f = (*decFnInfo).kInt64 + case reflect.Int32: + fn.f = (*decFnInfo).kInt32 + case reflect.Int8: + fn.f = (*decFnInfo).kInt8 + case reflect.Int16: + fn.f = (*decFnInfo).kInt16 + case reflect.Float32: + fn.f = (*decFnInfo).kFloat32 + case reflect.Float64: + fn.f = (*decFnInfo).kFloat64 + case reflect.Uint8: + fn.f = (*decFnInfo).kUint8 + case reflect.Uint64: + fn.f = (*decFnInfo).kUint64 + case reflect.Uint: + fn.f = (*decFnInfo).kUint + case reflect.Uint32: + fn.f = (*decFnInfo).kUint32 + case reflect.Uint16: + fn.f = (*decFnInfo).kUint16 + // case reflect.Ptr: + // fn.f = (*decFnInfo).kPtr + case reflect.Uintptr: + fn.f = (*decFnInfo).kUintptr + case reflect.Interface: + fn.f = (*decFnInfo).kInterface + case reflect.Struct: + fn.f = (*decFnInfo).kStruct + case reflect.Chan: + fi.seq = seqTypeChan + fn.f = (*decFnInfo).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.f = (*decFnInfo).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.f = (*decFnInfo).kArray + case reflect.Map: + fn.f = (*decFnInfo).kMap + default: + fn.f = (*decFnInfo).kErr + } + } + } + + return +} + +func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + // NOTE: rvkencname may be a stringView, so don't pass it to another function. + if d.h.ErrorIfNoField { + if index >= 0 { + d.errorf("no matching struct field found when decoding stream array at index %v", index) + return + } else if rvkencname != "" { + d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) + return + } + } + d.swallow() +} + +func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { + if d.h.ErrorIfNoArrayExpand { + d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) + } +} + +func (d *Decoder) chkPtrValue(rv reflect.Value) { + // We can only decode into a non-nil pointer + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + return + } + d.errNotValidPtrValue(rv) +} + +func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { + if !rv.IsValid() { + d.error(cannotDecodeIntoNilErr) + return + } + if !rv.CanInterface() { + d.errorf("cannot decode into a value without an interface: %v", rv) + return + } + rvi := rv.Interface() + d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi) +} + +func (d *Decoder) error(err error) { + panic(err) +} + +func (d *Decoder) errorf(format string, params ...interface{}) { + params2 := make([]interface{}, len(params)+1) + params2[0] = d.r.numread() + copy(params2[1:], params) + err := fmt.Errorf("[pos %d]: "+format, params2...) + panic(err) +} + +func (d *Decoder) string(v []byte) (s string) { + if d.is != nil { + s, ok := d.is[string(v)] // no allocation here. + if !ok { + s = string(v) + d.is[s] = s + } + return s + } + return string(v) // don't return stringView, as we need a real string here. +} + +func (d *Decoder) intern(s string) { + if d.is != nil { + d.is[s] = s + } +} + +// nextValueBytes returns the next value in the stream as a set of bytes. +func (d *Decoder) nextValueBytes() []byte { + d.d.uncacheRead() + d.r.track() + d.swallow() + return d.r.stopTrack() +} + +func (d *Decoder) raw() []byte { + // ensure that this is not a view into the bytes + // i.e. make new copy always. + bs := d.nextValueBytes() + bs2 := make([]byte, len(bs)) + copy(bs2, bs) + return bs2 +} + +// -------------------------------------------------- + +// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. +// A slice can be set from a map or array in stream. This supports the MapBySlice interface. +type decSliceHelper struct { + d *Decoder + // ct valueType + array bool +} + +func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { + dd := d.d + ctyp := dd.ContainerType() + if ctyp == valueTypeArray { + x.array = true + clen = dd.ReadArrayStart() + } else if ctyp == valueTypeMap { + clen = dd.ReadMapStart() * 2 + } else { + d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) + } + // x.ct = ctyp + x.d = d + return +} + +func (x decSliceHelper) End() { + cr := x.d.cr + if cr == nil { + return + } + if x.array { + cr.sendContainerState(containerArrayEnd) + } else { + cr.sendContainerState(containerMapEnd) + } +} + +func (x decSliceHelper) ElemContainerState(index int) { + cr := x.d.cr + if cr == nil { + return + } + if x.array { + cr.sendContainerState(containerArrayElem) + } else { + if index%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } +} + +func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { + if clen == 0 { + return zeroByteSlice + } + if len(bs) == clen { + bsOut = bs + r.readb(bsOut) + } else if cap(bs) >= clen { + bsOut = bs[:clen] + r.readb(bsOut) + } else { + // bsOut = make([]byte, clen) + len2, _ := decInferLen(clen, maxInitLen, 1) + bsOut = make([]byte, len2) + r.readb(bsOut) + for len2 < clen { + len3, _ := decInferLen(clen-len2, maxInitLen, 1) + // fmt.Printf(">>>>> TESTING: in loop: clen: %v, maxInitLen: %v, len2: %v, len3: %v\n", clen, maxInitLen, len2, len3) + bs3 := bsOut + bsOut = make([]byte, len2+len3) + copy(bsOut, bs3) + r.readb(bsOut[len2:]) + len2 += len3 + } + } + return +} + +func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { + if xlen := len(in); xlen > 0 { + if isBytesReader || xlen <= scratchByteArrayLen { + if cap(dest) >= xlen { + out = dest[:xlen] + } else { + out = make([]byte, xlen) + } + copy(out, in) + return + } + } + return in +} + +// decInferLen will infer a sensible length, given the following: +// - clen: length wanted. +// - maxlen: max length to be returned. +// if <= 0, it is unset, and we infer it based on the unit size +// - unit: number of bytes for each element of the collection +func decInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { + // handle when maxlen is not set i.e. <= 0 + if clen <= 0 { + return + } + if maxlen <= 0 { + // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. + // maxlen = 256 * 1024 / unit + // if maxlen < (4 * 1024) { + // maxlen = 4 * 1024 + // } + if unit < (256 / 4) { + maxlen = 256 * 1024 / unit + } else { + maxlen = 4 * 1024 + } + } + if clen > maxlen { + rvlen = maxlen + truncated = true + } else { + rvlen = clen + } + return + // if clen <= 0 { + // rvlen = 0 + // } else if maxlen > 0 && clen > maxlen { + // rvlen = maxlen + // truncated = true + // } else { + // rvlen = clen + // } + // return +} + +// // implement overall decReader wrapping both, for possible use inline: +// type decReaderT struct { +// bytes bool +// rb *bytesDecReader +// ri *ioDecReader +// } +// +// // implement *Decoder as a decReader. +// // Using decReaderT (defined just above) caused performance degradation +// // possibly because of constant copying the value, +// // and some value->interface conversion causing allocation. +// func (d *Decoder) unreadn1() { +// if d.bytes { +// d.rb.unreadn1() +// } else { +// d.ri.unreadn1() +// } +// } +// ... for other methods of decReader. +// Testing showed that performance improvement was negligible. diff --git a/vendor/github.com/ugorji/go/codec/decode_go.go b/vendor/github.com/ugorji/go/codec/decode_go.go new file mode 100644 index 0000000..ba289ce --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode_go.go @@ -0,0 +1,16 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = true + +func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { + rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + return +} diff --git a/vendor/github.com/ugorji/go/codec/decode_go14.go b/vendor/github.com/ugorji/go/codec/decode_go14.go new file mode 100644 index 0000000..50063bc --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode_go14.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = false + +func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { + panic("reflect.ArrayOf unsupported") +} diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go new file mode 100644 index 0000000..268154d --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/encode.go @@ -0,0 +1,1462 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "fmt" + "io" + "reflect" + "sort" + "sync" +) + +const ( + defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 +) + +// AsSymbolFlag defines what should be encoded as symbols. +type AsSymbolFlag uint8 + +const ( + // AsSymbolDefault is default. + // Currently, this means only encode struct field names as symbols. + // The default is subject to change. + AsSymbolDefault AsSymbolFlag = iota + + // AsSymbolAll means encode anything which could be a symbol as a symbol. + AsSymbolAll = 0xfe + + // AsSymbolNone means do not encode anything as a symbol. + AsSymbolNone = 1 << iota + + // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. + AsSymbolMapStringKeysFlag + + // AsSymbolStructFieldName means encode struct field names as symbols. + AsSymbolStructFieldNameFlag +) + +// encWriter abstracts writing to a byte array or to an io.Writer. +type encWriter interface { + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + atEndOfEncode() +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + IsBuiltinType(rt uintptr) bool + EncodeBuiltin(rt uintptr, v interface{}) + EncodeNil() + EncodeInt(i int64) + EncodeUint(i uint64) + EncodeBool(b bool) + EncodeFloat32(f float32) + EncodeFloat64(f float64) + // encodeExtPreamble(xtag byte, length int) + EncodeRawExt(re *RawExt, e *Encoder) + EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) + EncodeArrayStart(length int) + EncodeMapStart(length int) + EncodeString(c charEncoding, v string) + EncodeSymbol(v string) + EncodeStringBytes(c charEncoding, v []byte) + //TODO + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) + + reset() +} + +type encDriverAsis interface { + EncodeAsis(v []byte) +} + +type encNoSeparator struct{} + +func (_ encNoSeparator) EncodeEnd() {} + +type ioEncWriterWriter interface { + WriteByte(c byte) error + WriteString(s string) (n int, err error) + Write(p []byte) (n int, err error) +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type EncodeOptions struct { + // Encode a struct as an array, and not as a map + StructToArray bool + + // Canonical representation means that encoding a value will always result in the same + // sequence of bytes. + // + // This only affects maps, as the iteration order for maps is random. + // + // The implementation MAY use the natural sort order for the map keys if possible: + // + // - If there is a natural sort order (ie for number, bool, string or []byte keys), + // then the map keys are first sorted in natural order and then written + // with corresponding map values to the strema. + // - If there is no natural sort order, then the map keys will first be + // encoded into []byte, and then sorted, + // before writing the sorted keys and the corresponding map values to the stream. + // + Canonical bool + + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool + + // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers + // when checking if a value is empty. + // + // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. + RecursiveEmptyCheck bool + + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Sample values: + // AsSymbolNone + // AsSymbolAll + // AsSymbolMapStringKeys + // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + AsSymbols AsSymbolFlag +} + +// --------------------------------------------- + +type simpleIoEncWriterWriter struct { + w io.Writer + bw io.ByteWriter + sw ioEncStringWriter + bs [1]byte +} + +func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { + if o.bw != nil { + return o.bw.WriteByte(c) + } + // _, err = o.w.Write([]byte{c}) + o.bs[0] = c + _, err = o.w.Write(o.bs[:]) + return +} + +func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { + if o.sw != nil { + return o.sw.WriteString(s) + } + // return o.w.Write([]byte(s)) + return o.w.Write(bytesView(s)) +} + +func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { + return o.w.Write(p) +} + +// ---------------------------------------- + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w ioEncWriterWriter + s simpleIoEncWriterWriter + // x [8]byte // temp byte array re-used internally for efficiency +} + +func (z *ioEncWriter) writeb(bs []byte) { + if len(bs) == 0 { + return + } + n, err := z.w.Write(bs) + if err != nil { + panic(err) + } + if n != len(bs) { + panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n)) + } +} + +func (z *ioEncWriter) writestr(s string) { + n, err := z.w.WriteString(s) + if err != nil { + panic(err) + } + if n != len(s) { + panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n)) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.w.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1 byte, b2 byte) { + z.writen1(b1) + z.writen1(b2) +} + +func (z *ioEncWriter) atEndOfEncode() {} + +// ---------------------------------------- + +// bytesEncWriter implements encWriter and can write to an byte slice. +// It is used by Marshal function. +type bytesEncWriter struct { + b []byte + c int // cursor + out *[]byte // write out on atEndOfEncode +} + +func (z *bytesEncWriter) writeb(s []byte) { + if len(s) == 0 { + return + } + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) +} + +func (z *bytesEncWriter) writestr(s string) { + if len(s) == 0 { + return + } + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) +} + +func (z *bytesEncWriter) writen1(b1 byte) { + oc, a := z.growNoAlloc(1) + if a { + z.growAlloc(1, oc) + } + z.b[oc] = b1 +} + +func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { + oc, a := z.growNoAlloc(2) + if a { + z.growAlloc(2, oc) + } + z.b[oc+1] = b2 + z.b[oc] = b1 +} + +func (z *bytesEncWriter) atEndOfEncode() { + *(z.out) = z.b[:z.c] +} + +// have a growNoalloc(n int), which can be inlined. +// if allocation is needed, then call growAlloc(n int) + +func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) { + oldcursor = z.c + z.c = z.c + n + if z.c > len(z.b) { + if z.c > cap(z.b) { + allocNeeded = true + } else { + z.b = z.b[:cap(z.b)] + } + } + return +} + +func (z *bytesEncWriter) growAlloc(n int, oldcursor int) { + // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. + // bytes.Buffer model (2*cap + n): much better + // bs := make([]byte, 2*cap(z.b)+n) + bs := make([]byte, growCap(cap(z.b), 1, n)) + copy(bs, z.b[:oldcursor]) + z.b = bs +} + +// --------------------------------------------- + +type encFnInfo struct { + e *Encoder + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType +} + +func (f *encFnInfo) builtin(rv reflect.Value) { + f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface()) +} + +func (f *encFnInfo) raw(rv reflect.Value) { + f.e.raw(rv.Interface().(Raw)) +} + +func (f *encFnInfo) rawExt(rv reflect.Value) { + // rev := rv.Interface().(RawExt) + // f.e.e.EncodeRawExt(&rev, f.e) + var re *RawExt + if rv.CanAddr() { + re = rv.Addr().Interface().(*RawExt) + } else { + rev := rv.Interface().(RawExt) + re = &rev + } + f.e.e.EncodeRawExt(re, f.e) +} + +func (f *encFnInfo) ext(rv reflect.Value) { + // if this is a struct|array and it was addressable, then pass the address directly (not the value) + if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() { + rv = rv.Addr() + } + f.e.e.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e) +} + +func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) { + if indir == 0 { + v = rv.Interface() + } else if indir == -1 { + // If a non-pointer was passed to Encode(), then that value is not addressable. + // Take addr if addressable, else copy value to an addressable value. + if rv.CanAddr() { + v = rv.Addr().Interface() + } else { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + // fmt.Printf("rv.Type: %v, rv2.Type: %v, v: %v\n", rv.Type(), rv2.Type(), v) + } + } else { + for j := int8(0); j < indir; j++ { + if rv.IsNil() { + f.e.e.EncodeNil() + return + } + rv = rv.Elem() + } + v = rv.Interface() + } + return v, true +} + +func (f *encFnInfo) selferMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.csIndir); proceed { + v.(Selfer).CodecEncodeSelf(f.e) + } +} + +func (f *encFnInfo) binaryMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed { + bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary() + f.e.marshal(bs, fnerr, false, c_RAW) + } +} + +func (f *encFnInfo) textMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed { + // debugf(">>>> encoding.TextMarshaler: %T", rv.Interface()) + bs, fnerr := v.(encoding.TextMarshaler).MarshalText() + f.e.marshal(bs, fnerr, false, c_UTF8) + } +} + +func (f *encFnInfo) jsonMarshal(rv reflect.Value) { + if v, proceed := f.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed { + bs, fnerr := v.(jsonMarshaler).MarshalJSON() + f.e.marshal(bs, fnerr, true, c_UTF8) + } +} + +func (f *encFnInfo) kBool(rv reflect.Value) { + f.e.e.EncodeBool(rv.Bool()) +} + +func (f *encFnInfo) kString(rv reflect.Value) { + f.e.e.EncodeString(c_UTF8, rv.String()) +} + +func (f *encFnInfo) kFloat64(rv reflect.Value) { + f.e.e.EncodeFloat64(rv.Float()) +} + +func (f *encFnInfo) kFloat32(rv reflect.Value) { + f.e.e.EncodeFloat32(float32(rv.Float())) +} + +func (f *encFnInfo) kInt(rv reflect.Value) { + f.e.e.EncodeInt(rv.Int()) +} + +func (f *encFnInfo) kUint(rv reflect.Value) { + f.e.e.EncodeUint(rv.Uint()) +} + +func (f *encFnInfo) kInvalid(rv reflect.Value) { + f.e.e.EncodeNil() +} + +func (f *encFnInfo) kErr(rv reflect.Value) { + f.e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) +} + +func (f *encFnInfo) kSlice(rv reflect.Value) { + ti := f.ti + // array may be non-addressable, so we have to manage with care + // (don't call rv.Bytes, rv.Slice, etc). + // E.g. type struct S{B [2]byte}; + // Encode(S{}) will bomb on "panic: slice of unaddressable array". + e := f.e + if f.seq != seqTypeArray { + if rv.IsNil() { + e.e.EncodeNil() + return + } + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if ti.rtid == uint8SliceTypId { + e.e.EncodeStringBytes(c_RAW, rv.Bytes()) + return + } + } + cr := e.cr + rtelem := ti.rt.Elem() + l := rv.Len() + if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 { + switch f.seq { + case seqTypeArray: + // if l == 0 { e.e.encodeStringBytes(c_RAW, nil) } else + if rv.CanAddr() { + e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes()) + } else { + var bs []byte + if l <= cap(e.b) { + bs = e.b[:l] + } else { + bs = make([]byte, l) + } + reflect.Copy(reflect.ValueOf(bs), rv) + // TODO: Test that reflect.Copy works instead of manual one-by-one + // for i := 0; i < l; i++ { + // bs[i] = byte(rv.Index(i).Uint()) + // } + e.e.EncodeStringBytes(c_RAW, bs) + } + case seqTypeSlice: + e.e.EncodeStringBytes(c_RAW, rv.Bytes()) + case seqTypeChan: + bs := e.b[:0] + // do not use range, so that the number of elements encoded + // does not change, and encoding does not hang waiting on someone to close chan. + // for b := range rv.Interface().(<-chan byte) { + // bs = append(bs, b) + // } + ch := rv.Interface().(<-chan byte) + for i := 0; i < l; i++ { + bs = append(bs, <-ch) + } + e.e.EncodeStringBytes(c_RAW, bs) + } + return + } + + if ti.mbs { + if l%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", l) + return + } + e.e.EncodeMapStart(l / 2) + } else { + e.e.EncodeArrayStart(l) + } + + if l > 0 { + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + // if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var fn *encFn + if rtelem.Kind() != reflect.Interface { + rtelemid := reflect.ValueOf(rtelem).Pointer() + fn = e.getEncFn(rtelemid, rtelem, true, true) + } + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + for j := 0; j < l; j++ { + if cr != nil { + if ti.mbs { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } else { + cr.sendContainerState(containerArrayElem) + } + } + if f.seq == seqTypeChan { + if rv2, ok2 := rv.Recv(); ok2 { + e.encodeValue(rv2, fn) + } else { + e.encode(nil) // WE HAVE TO DO SOMETHING, so nil if nothing received. + } + } else { + e.encodeValue(rv.Index(j), fn) + } + } + } + + if cr != nil { + if ti.mbs { + cr.sendContainerState(containerMapEnd) + } else { + cr.sendContainerState(containerArrayEnd) + } + } +} + +func (f *encFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + e := f.e + cr := e.cr + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + newlen := len(fti.sfi) + + // Use sync.Pool to reduce allocating slices unnecessarily. + // The cost of sync.Pool is less than the cost of new allocation. + pool, poolv, fkvs := encStructPoolGet(newlen) + + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfi + } + newlen = 0 + var kv stringRv + recur := e.h.RecursiveEmptyCheck + for _, si := range tisfi { + kv.r = si.field(rv, false) + if toMap { + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { + continue + } + kv.v = si.encName + } else { + // use the zero value. + // if a reference or struct, set to nil (so you do not output too much) + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { + switch kv.r.Kind() { + case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice: + kv.r = reflect.Value{} //encode as nil + } + } + } + fkvs[newlen] = kv + newlen++ + } + + // debugf(">>>> kStruct: newlen: %v", newlen) + // sep := !e.be + ee := e.e //don't dereference every time + + if toMap { + ee.EncodeMapStart(newlen) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + for j := 0; j < newlen; j++ { + kv = fkvs[j] + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(kv.v) + } else { + ee.EncodeString(c_UTF8, kv.v) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(kv.r, nil) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + } else { + ee.EncodeArrayStart(newlen) + for j := 0; j < newlen; j++ { + kv = fkvs[j] + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + e.encodeValue(kv.r, nil) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } + } + + // do not use defer. Instead, use explicit pool return at end of function. + // defer has a cost we are trying to avoid. + // If there is a panic and these slices are not returned, it is ok. + if pool != nil { + pool.Put(poolv) + } +} + +// func (f *encFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") +// if rv.IsNil() { +// f.e.e.encodeNil() +// return +// } +// f.e.encodeValue(rv.Elem()) +// } + +// func (f *encFnInfo) kInterface(rv reflect.Value) { +// println("kInterface called") +// debug.PrintStack() +// if rv.IsNil() { +// f.e.e.EncodeNil() +// return +// } +// f.e.encodeValue(rv.Elem(), nil) +// } + +func (f *encFnInfo) kMap(rv reflect.Value) { + ee := f.e.e + if rv.IsNil() { + ee.EncodeNil() + return + } + + l := rv.Len() + ee.EncodeMapStart(l) + e := f.e + cr := e.cr + if l == 0 { + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return + } + var asSymbols bool + // determine the underlying key and val encFn's for the map. + // This eliminates some work which is done for each loop iteration i.e. + // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. + // + // However, if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var keyFn, valFn *encFn + ti := f.ti + rtkey := ti.rt.Key() + rtval := ti.rt.Elem() + rtkeyid := reflect.ValueOf(rtkey).Pointer() + // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String + var keyTypeIsString = rtkeyid == stringTypId + if keyTypeIsString { + asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + } else { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + rtkeyid = reflect.ValueOf(rtkey).Pointer() + keyFn = e.getEncFn(rtkeyid, rtkey, true, true) + } + } + for rtval.Kind() == reflect.Ptr { + rtval = rtval.Elem() + } + if rtval.Kind() != reflect.Interface { + rtvalid := reflect.ValueOf(rtval).Pointer() + valFn = e.getEncFn(rtvalid, rtval, true, true) + } + mks := rv.MapKeys() + // for j, lmks := 0, len(mks); j < lmks; j++ { + + if e.h.Canonical { + e.kMapCanonical(rtkeyid, rtkey, rv, mks, valFn, asSymbols) + } else { + for j := range mks { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if keyTypeIsString { + if asSymbols { + ee.EncodeSymbol(mks[j].String()) + } else { + ee.EncodeString(c_UTF8, mks[j].String()) + } + } else { + e.encodeValue(mks[j], keyFn) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mks[j]), valFn) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *encFn, asSymbols bool) { + ee := e.e + cr := e.cr + // we previously did out-of-band if an extension was registered. + // This is not necessary, as the natural kind is sufficient for ordering. + + if rtkeyid == uint8SliceTypId { + mksv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bytes() + } + sort.Sort(bytesRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeStringBytes(c_RAW, mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + } else { + switch rtkey.Kind() { + case reflect.Bool: + mksv := make([]boolRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bool() + } + sort.Sort(boolRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.String: + mksv := make([]stringRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.String() + } + sort.Sort(stringRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(mksv[i].v) + } else { + ee.EncodeString(c_UTF8, mksv[i].v) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]uintRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + sort.Sort(uintRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]intRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + sort.Sort(intRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Float32: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(mksv[i].v)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + case reflect.Float64: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(mksv[i].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + } + default: + // out-of-band + // first encode each key to a []byte first, then sort them, then record + var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + mksbv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + e2.MustEncode(k) + v.r = k + v.v = mksv[l:] + // fmt.Printf(">>>>> %s\n", mksv[l:]) + } + sort.Sort(bytesRvSlice(mksbv)) + for j := range mksbv { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(mksbv[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encodeValue(rv.MapIndex(mksbv[j].r), valFn) + } + } + } +} + +// -------------------------------------------------- + +// encFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type encFn struct { + i encFnInfo + f func(*encFnInfo, reflect.Value) +} + +// -------------------------------------------------- + +type encRtidFn struct { + rtid uintptr + fn encFn +} + +// An Encoder writes an object to an output stream in the codec format. +type Encoder struct { + // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder + e encDriver + // NOTE: Encoder shouldn't call it's write methods, + // as the handler MAY need to do some coordination. + w encWriter + s []encRtidFn + ci set + be bool // is binary encoding + js bool // is json handle + + wi ioEncWriter + wb bytesEncWriter + + h *BasicHandle + hh Handle + + cr containerStateRecv + as encDriverAsis + + f map[uintptr]*encFn + b [scratchByteArrayLen]byte +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + e := newEncoder(h) + e.Reset(w) + return e +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + e := newEncoder(h) + e.ResetBytes(out) + return e +} + +func newEncoder(h Handle) *Encoder { + e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} + _, e.js = h.(*JsonHandle) + e.e = h.newEncDriver(e) + e.as, _ = e.e.(encDriverAsis) + e.cr, _ = e.e.(containerStateRecv) + return e +} + +// Reset the Encoder with a new output stream. +// +// This accommodates using the state of the Encoder, +// where it has "cached" information about sub-engines. +func (e *Encoder) Reset(w io.Writer) { + ww, ok := w.(ioEncWriterWriter) + if ok { + e.wi.w = ww + } else { + sww := &e.wi.s + sww.w = w + sww.bw, _ = w.(io.ByteWriter) + sww.sw, _ = w.(ioEncStringWriter) + e.wi.w = sww + //ww = bufio.NewWriterSize(w, defEncByteBufSize) + } + e.w = &e.wi + e.e.reset() +} + +func (e *Encoder) ResetBytes(out *[]byte) { + in := *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + e.wb.b, e.wb.out, e.wb.c = in, out, 0 + e.w = &e.wb + e.e.reset() +} + +// func (e *Encoder) sendContainerState(c containerState) { +// if e.cr != nil { +// e.cr.sendContainerState(c) +// } +// } + +// Encode writes an object into a stream. +// +// Encoding can be configured via the struct tag for the fields. +// The "codec" key in struct field's tag value is the key name, +// followed by an optional comma and options. +// Note that the "json" key is used in the absence of the "codec" key. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's tag is "-", OR +// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the tag on the _struct field sets the "toarray" option +// Note that omitempty is ignored when encoding struct values as arrays, +// as an entry must be encoded for each field, to maintain its position. +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline except: +// - the struct tag specifies a replacement name (first value) +// - the field is of an interface type +// +// Examples: +// +// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// io.Reader //use key "Reader". +// MyStruct `codec:"my1" //use key "my1". +// MyStruct //inline it +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",toarray"` //encode struct as an array +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If a Selfer, call its CodecEncodeSelf method +// - If an extension is registered for it, call that extension function +// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + defer panicToErr(&err) + e.encode(v) + e.w.atEndOfEncode() + return +} + +// MustEncode is like Encode, but panics if unable to Encode. +// This provides insight to the code location that triggered the error. +func (e *Encoder) MustEncode(v interface{}) { + e.encode(v) + e.w.atEndOfEncode() +} + +func (e *Encoder) encode(iv interface{}) { + // if ics, ok := iv.(Selfer); ok { + // ics.CodecEncodeSelf(e) + // return + // } + + switch v := iv.(type) { + case nil: + e.e.EncodeNil() + case Selfer: + v.CodecEncodeSelf(e) + case Raw: + e.raw(v) + case reflect.Value: + e.encodeValue(v, nil) + + case string: + e.e.EncodeString(c_UTF8, v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + + case []uint8: + e.e.EncodeStringBytes(c_RAW, v) + + case *string: + e.e.EncodeString(c_UTF8, *v) + case *bool: + e.e.EncodeBool(*v) + case *int: + e.e.EncodeInt(int64(*v)) + case *int8: + e.e.EncodeInt(int64(*v)) + case *int16: + e.e.EncodeInt(int64(*v)) + case *int32: + e.e.EncodeInt(int64(*v)) + case *int64: + e.e.EncodeInt(*v) + case *uint: + e.e.EncodeUint(uint64(*v)) + case *uint8: + e.e.EncodeUint(uint64(*v)) + case *uint16: + e.e.EncodeUint(uint64(*v)) + case *uint32: + e.e.EncodeUint(uint64(*v)) + case *uint64: + e.e.EncodeUint(*v) + case *float32: + e.e.EncodeFloat32(*v) + case *float64: + e.e.EncodeFloat64(*v) + + case *[]uint8: + e.e.EncodeStringBytes(c_RAW, *v) + + default: + const checkCodecSelfer1 = true // in case T is passed, where *T is a Selfer, still checkCodecSelfer + if !fastpathEncodeTypeSwitch(iv, e) { + e.encodeI(iv, false, checkCodecSelfer1) + } + } +} + +func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, sptr uintptr, proceed bool) { + // use a goto statement instead of a recursive function for ptr/interface. +TOP: + switch rv.Kind() { + case reflect.Ptr: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { + // TODO: Movable pointers will be an issue here. Future problem. + sptr = rv.UnsafeAddr() + break TOP + } + goto TOP + case reflect.Interface: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + goto TOP + case reflect.Slice, reflect.Map: + if rv.IsNil() { + e.e.EncodeNil() + return + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + return + } + + proceed = true + rv2 = rv + return +} + +func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr, + checkFastpath, checkCodecSelfer bool) { + if sptr != 0 { + if (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) + } + } + if fn == nil { + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + // fn = e.getEncFn(rtid, rt, true, true) + fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer) + } + fn.f(&fn.i, rv) + if sptr != 0 { + (&e.ci).remove(sptr) + } +} + +func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) { + if rv, sptr, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed { + e.doEncodeValue(rv, nil, sptr, checkFastpath, checkCodecSelfer) + } +} + +func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) { + // if a valid fn is passed, it MUST BE for the dereferenced type of rv + if rv, sptr, proceed := e.preEncodeValue(rv); proceed { + e.doEncodeValue(rv, fn, sptr, true, true) + } +} + +func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *encFn) { + // rtid := reflect.ValueOf(rt).Pointer() + var ok bool + if useMapForCodecCache { + fn, ok = e.f[rtid] + } else { + for i := range e.s { + v := &(e.s[i]) + if v.rtid == rtid { + fn, ok = &(v.fn), true + break + } + } + } + if ok { + return + } + + if useMapForCodecCache { + if e.f == nil { + e.f = make(map[uintptr]*encFn, initCollectionCap) + } + fn = new(encFn) + e.f[rtid] = fn + } else { + if e.s == nil { + e.s = make([]encRtidFn, 0, initCollectionCap) + } + e.s = append(e.s, encRtidFn{rtid: rtid}) + fn = &(e.s[len(e.s)-1]).fn + } + + ti := e.h.getTypeInfo(rtid, rt) + fi := &(fn.i) + fi.e = e + fi.ti = ti + + if checkCodecSelfer && ti.cs { + fn.f = (*encFnInfo).selferMarshal + } else if rtid == rawTypId { + fn.f = (*encFnInfo).raw + } else if rtid == rawExtTypId { + fn.f = (*encFnInfo).rawExt + } else if e.e.IsBuiltinType(rtid) { + fn.f = (*encFnInfo).builtin + } else if xfFn := e.h.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.f = (*encFnInfo).ext + } else if supportMarshalInterfaces && e.be && ti.bm { + fn.f = (*encFnInfo).binaryMarshal + } else if supportMarshalInterfaces && !e.be && e.js && ti.jm { + //If JSON, we should check JSONMarshal before textMarshal + fn.f = (*encFnInfo).jsonMarshal + } else if supportMarshalInterfaces && !e.be && ti.tm { + fn.f = (*encFnInfo).textMarshal + } else { + rk := rt.Kind() + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if rt.PkgPath() == "" { // un-named slice or map + if idx := fastpathAV.index(rtid); idx != -1 { + fn.f = fastpathAV[idx].encfn + } + } else { + ok = false + // use mapping for underlying type if there + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(rt.Key(), rt.Elem()) + } else { + rtu = reflect.SliceOf(rt.Elem()) + } + rtuid := reflect.ValueOf(rtu).Pointer() + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].encfn + xrt := fastpathAV[idx].rt + fn.f = func(xf *encFnInfo, xrv reflect.Value) { + xfnf(xf, xrv.Convert(xrt)) + } + } + } + } + if fn.f == nil { + switch rk { + case reflect.Bool: + fn.f = (*encFnInfo).kBool + case reflect.String: + fn.f = (*encFnInfo).kString + case reflect.Float64: + fn.f = (*encFnInfo).kFloat64 + case reflect.Float32: + fn.f = (*encFnInfo).kFloat32 + case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: + fn.f = (*encFnInfo).kInt + case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uintptr: + fn.f = (*encFnInfo).kUint + case reflect.Invalid: + fn.f = (*encFnInfo).kInvalid + case reflect.Chan: + fi.seq = seqTypeChan + fn.f = (*encFnInfo).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.f = (*encFnInfo).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.f = (*encFnInfo).kSlice + case reflect.Struct: + fn.f = (*encFnInfo).kStruct + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + // case reflect.Ptr: + // fn.f = (*encFnInfo).kPtr + // case reflect.Interface: + // fn.f = (*encFnInfo).kInterface + case reflect.Map: + fn.f = (*encFnInfo).kMap + default: + fn.f = (*encFnInfo).kErr + } + } + } + + return +} + +func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else if asis { + e.asis(bs) + } else { + e.e.EncodeStringBytes(c, bs) + } +} + +func (e *Encoder) asis(v []byte) { + if e.as == nil { + e.w.writeb(v) + } else { + e.as.EncodeAsis(v) + } +} + +func (e *Encoder) raw(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + e.errorf("Raw values cannot be encoded: %v", v) + } + if e.as == nil { + e.w.writeb(v) + } else { + e.as.EncodeAsis(v) + } +} + +func (e *Encoder) errorf(format string, params ...interface{}) { + err := fmt.Errorf(format, params...) + panic(err) +} + +// ---------------------------------------- + +const encStructPoolLen = 5 + +// encStructPool is an array of sync.Pool. +// Each element of the array pools one of encStructPool(8|16|32|64). +// It allows the re-use of slices up to 64 in length. +// A performance cost of encoding structs was collecting +// which values were empty and should be omitted. +// We needed slices of reflect.Value and string to collect them. +// This shared pool reduces the amount of unnecessary creation we do. +// The cost is that of locking sometimes, but sync.Pool is efficient +// enough to reduce thread contention. +var encStructPool [encStructPoolLen]sync.Pool + +func init() { + encStructPool[0].New = func() interface{} { return new([8]stringRv) } + encStructPool[1].New = func() interface{} { return new([16]stringRv) } + encStructPool[2].New = func() interface{} { return new([32]stringRv) } + encStructPool[3].New = func() interface{} { return new([64]stringRv) } + encStructPool[4].New = func() interface{} { return new([128]stringRv) } +} + +func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) { + // if encStructPoolLen != 5 { // constant chec, so removed at build time. + // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed + // } + // idxpool := newlen / 8 + if newlen <= 8 { + p = &encStructPool[0] + v = p.Get() + s = v.(*[8]stringRv)[:newlen] + } else if newlen <= 16 { + p = &encStructPool[1] + v = p.Get() + s = v.(*[16]stringRv)[:newlen] + } else if newlen <= 32 { + p = &encStructPool[2] + v = p.Get() + s = v.(*[32]stringRv)[:newlen] + } else if newlen <= 64 { + p = &encStructPool[3] + v = p.Get() + s = v.(*[64]stringRv)[:newlen] + } else if newlen <= 128 { + p = &encStructPool[4] + v = p.Get() + s = v.(*[128]stringRv)[:newlen] + } else { + s = make([]stringRv, newlen) + } + return +} + +// ---------------------------------------- + +// func encErr(format string, params ...interface{}) { +// doPanic(msgTagEnc, format, params...) +// } diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go new file mode 100644 index 0000000..f2e5d2d --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go @@ -0,0 +1,39352 @@ +// +build !notfastpath + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl +// ************************************************************ + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathEnabled = true + +const fastpathCheckNilFalse = false // for reflect +const fastpathCheckNilTrue = true // for type switch + +type fastpathT struct{} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*encFnInfo, reflect.Value) + decfn func(*decFnInfo, reflect.Value) +} + +type fastpathA [271]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + h, i, j := 0, 0, 271 // len(x) + for i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < 271 && x[i].rtid == rtid { + return i + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } +func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + i := 0 + fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { + xrt := reflect.TypeOf(v) + xptr := reflect.ValueOf(xrt).Pointer() + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + return + } + + fn([]interface{}(nil), (*encFnInfo).fastpathEncSliceIntfR, (*decFnInfo).fastpathDecSliceIntfR) + fn([]string(nil), (*encFnInfo).fastpathEncSliceStringR, (*decFnInfo).fastpathDecSliceStringR) + fn([]float32(nil), (*encFnInfo).fastpathEncSliceFloat32R, (*decFnInfo).fastpathDecSliceFloat32R) + fn([]float64(nil), (*encFnInfo).fastpathEncSliceFloat64R, (*decFnInfo).fastpathDecSliceFloat64R) + fn([]uint(nil), (*encFnInfo).fastpathEncSliceUintR, (*decFnInfo).fastpathDecSliceUintR) + fn([]uint16(nil), (*encFnInfo).fastpathEncSliceUint16R, (*decFnInfo).fastpathDecSliceUint16R) + fn([]uint32(nil), (*encFnInfo).fastpathEncSliceUint32R, (*decFnInfo).fastpathDecSliceUint32R) + fn([]uint64(nil), (*encFnInfo).fastpathEncSliceUint64R, (*decFnInfo).fastpathDecSliceUint64R) + fn([]uintptr(nil), (*encFnInfo).fastpathEncSliceUintptrR, (*decFnInfo).fastpathDecSliceUintptrR) + fn([]int(nil), (*encFnInfo).fastpathEncSliceIntR, (*decFnInfo).fastpathDecSliceIntR) + fn([]int8(nil), (*encFnInfo).fastpathEncSliceInt8R, (*decFnInfo).fastpathDecSliceInt8R) + fn([]int16(nil), (*encFnInfo).fastpathEncSliceInt16R, (*decFnInfo).fastpathDecSliceInt16R) + fn([]int32(nil), (*encFnInfo).fastpathEncSliceInt32R, (*decFnInfo).fastpathDecSliceInt32R) + fn([]int64(nil), (*encFnInfo).fastpathEncSliceInt64R, (*decFnInfo).fastpathDecSliceInt64R) + fn([]bool(nil), (*encFnInfo).fastpathEncSliceBoolR, (*decFnInfo).fastpathDecSliceBoolR) + + fn(map[interface{}]interface{}(nil), (*encFnInfo).fastpathEncMapIntfIntfR, (*decFnInfo).fastpathDecMapIntfIntfR) + fn(map[interface{}]string(nil), (*encFnInfo).fastpathEncMapIntfStringR, (*decFnInfo).fastpathDecMapIntfStringR) + fn(map[interface{}]uint(nil), (*encFnInfo).fastpathEncMapIntfUintR, (*decFnInfo).fastpathDecMapIntfUintR) + fn(map[interface{}]uint8(nil), (*encFnInfo).fastpathEncMapIntfUint8R, (*decFnInfo).fastpathDecMapIntfUint8R) + fn(map[interface{}]uint16(nil), (*encFnInfo).fastpathEncMapIntfUint16R, (*decFnInfo).fastpathDecMapIntfUint16R) + fn(map[interface{}]uint32(nil), (*encFnInfo).fastpathEncMapIntfUint32R, (*decFnInfo).fastpathDecMapIntfUint32R) + fn(map[interface{}]uint64(nil), (*encFnInfo).fastpathEncMapIntfUint64R, (*decFnInfo).fastpathDecMapIntfUint64R) + fn(map[interface{}]uintptr(nil), (*encFnInfo).fastpathEncMapIntfUintptrR, (*decFnInfo).fastpathDecMapIntfUintptrR) + fn(map[interface{}]int(nil), (*encFnInfo).fastpathEncMapIntfIntR, (*decFnInfo).fastpathDecMapIntfIntR) + fn(map[interface{}]int8(nil), (*encFnInfo).fastpathEncMapIntfInt8R, (*decFnInfo).fastpathDecMapIntfInt8R) + fn(map[interface{}]int16(nil), (*encFnInfo).fastpathEncMapIntfInt16R, (*decFnInfo).fastpathDecMapIntfInt16R) + fn(map[interface{}]int32(nil), (*encFnInfo).fastpathEncMapIntfInt32R, (*decFnInfo).fastpathDecMapIntfInt32R) + fn(map[interface{}]int64(nil), (*encFnInfo).fastpathEncMapIntfInt64R, (*decFnInfo).fastpathDecMapIntfInt64R) + fn(map[interface{}]float32(nil), (*encFnInfo).fastpathEncMapIntfFloat32R, (*decFnInfo).fastpathDecMapIntfFloat32R) + fn(map[interface{}]float64(nil), (*encFnInfo).fastpathEncMapIntfFloat64R, (*decFnInfo).fastpathDecMapIntfFloat64R) + fn(map[interface{}]bool(nil), (*encFnInfo).fastpathEncMapIntfBoolR, (*decFnInfo).fastpathDecMapIntfBoolR) + fn(map[string]interface{}(nil), (*encFnInfo).fastpathEncMapStringIntfR, (*decFnInfo).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*encFnInfo).fastpathEncMapStringStringR, (*decFnInfo).fastpathDecMapStringStringR) + fn(map[string]uint(nil), (*encFnInfo).fastpathEncMapStringUintR, (*decFnInfo).fastpathDecMapStringUintR) + fn(map[string]uint8(nil), (*encFnInfo).fastpathEncMapStringUint8R, (*decFnInfo).fastpathDecMapStringUint8R) + fn(map[string]uint16(nil), (*encFnInfo).fastpathEncMapStringUint16R, (*decFnInfo).fastpathDecMapStringUint16R) + fn(map[string]uint32(nil), (*encFnInfo).fastpathEncMapStringUint32R, (*decFnInfo).fastpathDecMapStringUint32R) + fn(map[string]uint64(nil), (*encFnInfo).fastpathEncMapStringUint64R, (*decFnInfo).fastpathDecMapStringUint64R) + fn(map[string]uintptr(nil), (*encFnInfo).fastpathEncMapStringUintptrR, (*decFnInfo).fastpathDecMapStringUintptrR) + fn(map[string]int(nil), (*encFnInfo).fastpathEncMapStringIntR, (*decFnInfo).fastpathDecMapStringIntR) + fn(map[string]int8(nil), (*encFnInfo).fastpathEncMapStringInt8R, (*decFnInfo).fastpathDecMapStringInt8R) + fn(map[string]int16(nil), (*encFnInfo).fastpathEncMapStringInt16R, (*decFnInfo).fastpathDecMapStringInt16R) + fn(map[string]int32(nil), (*encFnInfo).fastpathEncMapStringInt32R, (*decFnInfo).fastpathDecMapStringInt32R) + fn(map[string]int64(nil), (*encFnInfo).fastpathEncMapStringInt64R, (*decFnInfo).fastpathDecMapStringInt64R) + fn(map[string]float32(nil), (*encFnInfo).fastpathEncMapStringFloat32R, (*decFnInfo).fastpathDecMapStringFloat32R) + fn(map[string]float64(nil), (*encFnInfo).fastpathEncMapStringFloat64R, (*decFnInfo).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*encFnInfo).fastpathEncMapStringBoolR, (*decFnInfo).fastpathDecMapStringBoolR) + fn(map[float32]interface{}(nil), (*encFnInfo).fastpathEncMapFloat32IntfR, (*decFnInfo).fastpathDecMapFloat32IntfR) + fn(map[float32]string(nil), (*encFnInfo).fastpathEncMapFloat32StringR, (*decFnInfo).fastpathDecMapFloat32StringR) + fn(map[float32]uint(nil), (*encFnInfo).fastpathEncMapFloat32UintR, (*decFnInfo).fastpathDecMapFloat32UintR) + fn(map[float32]uint8(nil), (*encFnInfo).fastpathEncMapFloat32Uint8R, (*decFnInfo).fastpathDecMapFloat32Uint8R) + fn(map[float32]uint16(nil), (*encFnInfo).fastpathEncMapFloat32Uint16R, (*decFnInfo).fastpathDecMapFloat32Uint16R) + fn(map[float32]uint32(nil), (*encFnInfo).fastpathEncMapFloat32Uint32R, (*decFnInfo).fastpathDecMapFloat32Uint32R) + fn(map[float32]uint64(nil), (*encFnInfo).fastpathEncMapFloat32Uint64R, (*decFnInfo).fastpathDecMapFloat32Uint64R) + fn(map[float32]uintptr(nil), (*encFnInfo).fastpathEncMapFloat32UintptrR, (*decFnInfo).fastpathDecMapFloat32UintptrR) + fn(map[float32]int(nil), (*encFnInfo).fastpathEncMapFloat32IntR, (*decFnInfo).fastpathDecMapFloat32IntR) + fn(map[float32]int8(nil), (*encFnInfo).fastpathEncMapFloat32Int8R, (*decFnInfo).fastpathDecMapFloat32Int8R) + fn(map[float32]int16(nil), (*encFnInfo).fastpathEncMapFloat32Int16R, (*decFnInfo).fastpathDecMapFloat32Int16R) + fn(map[float32]int32(nil), (*encFnInfo).fastpathEncMapFloat32Int32R, (*decFnInfo).fastpathDecMapFloat32Int32R) + fn(map[float32]int64(nil), (*encFnInfo).fastpathEncMapFloat32Int64R, (*decFnInfo).fastpathDecMapFloat32Int64R) + fn(map[float32]float32(nil), (*encFnInfo).fastpathEncMapFloat32Float32R, (*decFnInfo).fastpathDecMapFloat32Float32R) + fn(map[float32]float64(nil), (*encFnInfo).fastpathEncMapFloat32Float64R, (*decFnInfo).fastpathDecMapFloat32Float64R) + fn(map[float32]bool(nil), (*encFnInfo).fastpathEncMapFloat32BoolR, (*decFnInfo).fastpathDecMapFloat32BoolR) + fn(map[float64]interface{}(nil), (*encFnInfo).fastpathEncMapFloat64IntfR, (*decFnInfo).fastpathDecMapFloat64IntfR) + fn(map[float64]string(nil), (*encFnInfo).fastpathEncMapFloat64StringR, (*decFnInfo).fastpathDecMapFloat64StringR) + fn(map[float64]uint(nil), (*encFnInfo).fastpathEncMapFloat64UintR, (*decFnInfo).fastpathDecMapFloat64UintR) + fn(map[float64]uint8(nil), (*encFnInfo).fastpathEncMapFloat64Uint8R, (*decFnInfo).fastpathDecMapFloat64Uint8R) + fn(map[float64]uint16(nil), (*encFnInfo).fastpathEncMapFloat64Uint16R, (*decFnInfo).fastpathDecMapFloat64Uint16R) + fn(map[float64]uint32(nil), (*encFnInfo).fastpathEncMapFloat64Uint32R, (*decFnInfo).fastpathDecMapFloat64Uint32R) + fn(map[float64]uint64(nil), (*encFnInfo).fastpathEncMapFloat64Uint64R, (*decFnInfo).fastpathDecMapFloat64Uint64R) + fn(map[float64]uintptr(nil), (*encFnInfo).fastpathEncMapFloat64UintptrR, (*decFnInfo).fastpathDecMapFloat64UintptrR) + fn(map[float64]int(nil), (*encFnInfo).fastpathEncMapFloat64IntR, (*decFnInfo).fastpathDecMapFloat64IntR) + fn(map[float64]int8(nil), (*encFnInfo).fastpathEncMapFloat64Int8R, (*decFnInfo).fastpathDecMapFloat64Int8R) + fn(map[float64]int16(nil), (*encFnInfo).fastpathEncMapFloat64Int16R, (*decFnInfo).fastpathDecMapFloat64Int16R) + fn(map[float64]int32(nil), (*encFnInfo).fastpathEncMapFloat64Int32R, (*decFnInfo).fastpathDecMapFloat64Int32R) + fn(map[float64]int64(nil), (*encFnInfo).fastpathEncMapFloat64Int64R, (*decFnInfo).fastpathDecMapFloat64Int64R) + fn(map[float64]float32(nil), (*encFnInfo).fastpathEncMapFloat64Float32R, (*decFnInfo).fastpathDecMapFloat64Float32R) + fn(map[float64]float64(nil), (*encFnInfo).fastpathEncMapFloat64Float64R, (*decFnInfo).fastpathDecMapFloat64Float64R) + fn(map[float64]bool(nil), (*encFnInfo).fastpathEncMapFloat64BoolR, (*decFnInfo).fastpathDecMapFloat64BoolR) + fn(map[uint]interface{}(nil), (*encFnInfo).fastpathEncMapUintIntfR, (*decFnInfo).fastpathDecMapUintIntfR) + fn(map[uint]string(nil), (*encFnInfo).fastpathEncMapUintStringR, (*decFnInfo).fastpathDecMapUintStringR) + fn(map[uint]uint(nil), (*encFnInfo).fastpathEncMapUintUintR, (*decFnInfo).fastpathDecMapUintUintR) + fn(map[uint]uint8(nil), (*encFnInfo).fastpathEncMapUintUint8R, (*decFnInfo).fastpathDecMapUintUint8R) + fn(map[uint]uint16(nil), (*encFnInfo).fastpathEncMapUintUint16R, (*decFnInfo).fastpathDecMapUintUint16R) + fn(map[uint]uint32(nil), (*encFnInfo).fastpathEncMapUintUint32R, (*decFnInfo).fastpathDecMapUintUint32R) + fn(map[uint]uint64(nil), (*encFnInfo).fastpathEncMapUintUint64R, (*decFnInfo).fastpathDecMapUintUint64R) + fn(map[uint]uintptr(nil), (*encFnInfo).fastpathEncMapUintUintptrR, (*decFnInfo).fastpathDecMapUintUintptrR) + fn(map[uint]int(nil), (*encFnInfo).fastpathEncMapUintIntR, (*decFnInfo).fastpathDecMapUintIntR) + fn(map[uint]int8(nil), (*encFnInfo).fastpathEncMapUintInt8R, (*decFnInfo).fastpathDecMapUintInt8R) + fn(map[uint]int16(nil), (*encFnInfo).fastpathEncMapUintInt16R, (*decFnInfo).fastpathDecMapUintInt16R) + fn(map[uint]int32(nil), (*encFnInfo).fastpathEncMapUintInt32R, (*decFnInfo).fastpathDecMapUintInt32R) + fn(map[uint]int64(nil), (*encFnInfo).fastpathEncMapUintInt64R, (*decFnInfo).fastpathDecMapUintInt64R) + fn(map[uint]float32(nil), (*encFnInfo).fastpathEncMapUintFloat32R, (*decFnInfo).fastpathDecMapUintFloat32R) + fn(map[uint]float64(nil), (*encFnInfo).fastpathEncMapUintFloat64R, (*decFnInfo).fastpathDecMapUintFloat64R) + fn(map[uint]bool(nil), (*encFnInfo).fastpathEncMapUintBoolR, (*decFnInfo).fastpathDecMapUintBoolR) + fn(map[uint8]interface{}(nil), (*encFnInfo).fastpathEncMapUint8IntfR, (*decFnInfo).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*encFnInfo).fastpathEncMapUint8StringR, (*decFnInfo).fastpathDecMapUint8StringR) + fn(map[uint8]uint(nil), (*encFnInfo).fastpathEncMapUint8UintR, (*decFnInfo).fastpathDecMapUint8UintR) + fn(map[uint8]uint8(nil), (*encFnInfo).fastpathEncMapUint8Uint8R, (*decFnInfo).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint16(nil), (*encFnInfo).fastpathEncMapUint8Uint16R, (*decFnInfo).fastpathDecMapUint8Uint16R) + fn(map[uint8]uint32(nil), (*encFnInfo).fastpathEncMapUint8Uint32R, (*decFnInfo).fastpathDecMapUint8Uint32R) + fn(map[uint8]uint64(nil), (*encFnInfo).fastpathEncMapUint8Uint64R, (*decFnInfo).fastpathDecMapUint8Uint64R) + fn(map[uint8]uintptr(nil), (*encFnInfo).fastpathEncMapUint8UintptrR, (*decFnInfo).fastpathDecMapUint8UintptrR) + fn(map[uint8]int(nil), (*encFnInfo).fastpathEncMapUint8IntR, (*decFnInfo).fastpathDecMapUint8IntR) + fn(map[uint8]int8(nil), (*encFnInfo).fastpathEncMapUint8Int8R, (*decFnInfo).fastpathDecMapUint8Int8R) + fn(map[uint8]int16(nil), (*encFnInfo).fastpathEncMapUint8Int16R, (*decFnInfo).fastpathDecMapUint8Int16R) + fn(map[uint8]int32(nil), (*encFnInfo).fastpathEncMapUint8Int32R, (*decFnInfo).fastpathDecMapUint8Int32R) + fn(map[uint8]int64(nil), (*encFnInfo).fastpathEncMapUint8Int64R, (*decFnInfo).fastpathDecMapUint8Int64R) + fn(map[uint8]float32(nil), (*encFnInfo).fastpathEncMapUint8Float32R, (*decFnInfo).fastpathDecMapUint8Float32R) + fn(map[uint8]float64(nil), (*encFnInfo).fastpathEncMapUint8Float64R, (*decFnInfo).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*encFnInfo).fastpathEncMapUint8BoolR, (*decFnInfo).fastpathDecMapUint8BoolR) + fn(map[uint16]interface{}(nil), (*encFnInfo).fastpathEncMapUint16IntfR, (*decFnInfo).fastpathDecMapUint16IntfR) + fn(map[uint16]string(nil), (*encFnInfo).fastpathEncMapUint16StringR, (*decFnInfo).fastpathDecMapUint16StringR) + fn(map[uint16]uint(nil), (*encFnInfo).fastpathEncMapUint16UintR, (*decFnInfo).fastpathDecMapUint16UintR) + fn(map[uint16]uint8(nil), (*encFnInfo).fastpathEncMapUint16Uint8R, (*decFnInfo).fastpathDecMapUint16Uint8R) + fn(map[uint16]uint16(nil), (*encFnInfo).fastpathEncMapUint16Uint16R, (*decFnInfo).fastpathDecMapUint16Uint16R) + fn(map[uint16]uint32(nil), (*encFnInfo).fastpathEncMapUint16Uint32R, (*decFnInfo).fastpathDecMapUint16Uint32R) + fn(map[uint16]uint64(nil), (*encFnInfo).fastpathEncMapUint16Uint64R, (*decFnInfo).fastpathDecMapUint16Uint64R) + fn(map[uint16]uintptr(nil), (*encFnInfo).fastpathEncMapUint16UintptrR, (*decFnInfo).fastpathDecMapUint16UintptrR) + fn(map[uint16]int(nil), (*encFnInfo).fastpathEncMapUint16IntR, (*decFnInfo).fastpathDecMapUint16IntR) + fn(map[uint16]int8(nil), (*encFnInfo).fastpathEncMapUint16Int8R, (*decFnInfo).fastpathDecMapUint16Int8R) + fn(map[uint16]int16(nil), (*encFnInfo).fastpathEncMapUint16Int16R, (*decFnInfo).fastpathDecMapUint16Int16R) + fn(map[uint16]int32(nil), (*encFnInfo).fastpathEncMapUint16Int32R, (*decFnInfo).fastpathDecMapUint16Int32R) + fn(map[uint16]int64(nil), (*encFnInfo).fastpathEncMapUint16Int64R, (*decFnInfo).fastpathDecMapUint16Int64R) + fn(map[uint16]float32(nil), (*encFnInfo).fastpathEncMapUint16Float32R, (*decFnInfo).fastpathDecMapUint16Float32R) + fn(map[uint16]float64(nil), (*encFnInfo).fastpathEncMapUint16Float64R, (*decFnInfo).fastpathDecMapUint16Float64R) + fn(map[uint16]bool(nil), (*encFnInfo).fastpathEncMapUint16BoolR, (*decFnInfo).fastpathDecMapUint16BoolR) + fn(map[uint32]interface{}(nil), (*encFnInfo).fastpathEncMapUint32IntfR, (*decFnInfo).fastpathDecMapUint32IntfR) + fn(map[uint32]string(nil), (*encFnInfo).fastpathEncMapUint32StringR, (*decFnInfo).fastpathDecMapUint32StringR) + fn(map[uint32]uint(nil), (*encFnInfo).fastpathEncMapUint32UintR, (*decFnInfo).fastpathDecMapUint32UintR) + fn(map[uint32]uint8(nil), (*encFnInfo).fastpathEncMapUint32Uint8R, (*decFnInfo).fastpathDecMapUint32Uint8R) + fn(map[uint32]uint16(nil), (*encFnInfo).fastpathEncMapUint32Uint16R, (*decFnInfo).fastpathDecMapUint32Uint16R) + fn(map[uint32]uint32(nil), (*encFnInfo).fastpathEncMapUint32Uint32R, (*decFnInfo).fastpathDecMapUint32Uint32R) + fn(map[uint32]uint64(nil), (*encFnInfo).fastpathEncMapUint32Uint64R, (*decFnInfo).fastpathDecMapUint32Uint64R) + fn(map[uint32]uintptr(nil), (*encFnInfo).fastpathEncMapUint32UintptrR, (*decFnInfo).fastpathDecMapUint32UintptrR) + fn(map[uint32]int(nil), (*encFnInfo).fastpathEncMapUint32IntR, (*decFnInfo).fastpathDecMapUint32IntR) + fn(map[uint32]int8(nil), (*encFnInfo).fastpathEncMapUint32Int8R, (*decFnInfo).fastpathDecMapUint32Int8R) + fn(map[uint32]int16(nil), (*encFnInfo).fastpathEncMapUint32Int16R, (*decFnInfo).fastpathDecMapUint32Int16R) + fn(map[uint32]int32(nil), (*encFnInfo).fastpathEncMapUint32Int32R, (*decFnInfo).fastpathDecMapUint32Int32R) + fn(map[uint32]int64(nil), (*encFnInfo).fastpathEncMapUint32Int64R, (*decFnInfo).fastpathDecMapUint32Int64R) + fn(map[uint32]float32(nil), (*encFnInfo).fastpathEncMapUint32Float32R, (*decFnInfo).fastpathDecMapUint32Float32R) + fn(map[uint32]float64(nil), (*encFnInfo).fastpathEncMapUint32Float64R, (*decFnInfo).fastpathDecMapUint32Float64R) + fn(map[uint32]bool(nil), (*encFnInfo).fastpathEncMapUint32BoolR, (*decFnInfo).fastpathDecMapUint32BoolR) + fn(map[uint64]interface{}(nil), (*encFnInfo).fastpathEncMapUint64IntfR, (*decFnInfo).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*encFnInfo).fastpathEncMapUint64StringR, (*decFnInfo).fastpathDecMapUint64StringR) + fn(map[uint64]uint(nil), (*encFnInfo).fastpathEncMapUint64UintR, (*decFnInfo).fastpathDecMapUint64UintR) + fn(map[uint64]uint8(nil), (*encFnInfo).fastpathEncMapUint64Uint8R, (*decFnInfo).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint16(nil), (*encFnInfo).fastpathEncMapUint64Uint16R, (*decFnInfo).fastpathDecMapUint64Uint16R) + fn(map[uint64]uint32(nil), (*encFnInfo).fastpathEncMapUint64Uint32R, (*decFnInfo).fastpathDecMapUint64Uint32R) + fn(map[uint64]uint64(nil), (*encFnInfo).fastpathEncMapUint64Uint64R, (*decFnInfo).fastpathDecMapUint64Uint64R) + fn(map[uint64]uintptr(nil), (*encFnInfo).fastpathEncMapUint64UintptrR, (*decFnInfo).fastpathDecMapUint64UintptrR) + fn(map[uint64]int(nil), (*encFnInfo).fastpathEncMapUint64IntR, (*decFnInfo).fastpathDecMapUint64IntR) + fn(map[uint64]int8(nil), (*encFnInfo).fastpathEncMapUint64Int8R, (*decFnInfo).fastpathDecMapUint64Int8R) + fn(map[uint64]int16(nil), (*encFnInfo).fastpathEncMapUint64Int16R, (*decFnInfo).fastpathDecMapUint64Int16R) + fn(map[uint64]int32(nil), (*encFnInfo).fastpathEncMapUint64Int32R, (*decFnInfo).fastpathDecMapUint64Int32R) + fn(map[uint64]int64(nil), (*encFnInfo).fastpathEncMapUint64Int64R, (*decFnInfo).fastpathDecMapUint64Int64R) + fn(map[uint64]float32(nil), (*encFnInfo).fastpathEncMapUint64Float32R, (*decFnInfo).fastpathDecMapUint64Float32R) + fn(map[uint64]float64(nil), (*encFnInfo).fastpathEncMapUint64Float64R, (*decFnInfo).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*encFnInfo).fastpathEncMapUint64BoolR, (*decFnInfo).fastpathDecMapUint64BoolR) + fn(map[uintptr]interface{}(nil), (*encFnInfo).fastpathEncMapUintptrIntfR, (*decFnInfo).fastpathDecMapUintptrIntfR) + fn(map[uintptr]string(nil), (*encFnInfo).fastpathEncMapUintptrStringR, (*decFnInfo).fastpathDecMapUintptrStringR) + fn(map[uintptr]uint(nil), (*encFnInfo).fastpathEncMapUintptrUintR, (*decFnInfo).fastpathDecMapUintptrUintR) + fn(map[uintptr]uint8(nil), (*encFnInfo).fastpathEncMapUintptrUint8R, (*decFnInfo).fastpathDecMapUintptrUint8R) + fn(map[uintptr]uint16(nil), (*encFnInfo).fastpathEncMapUintptrUint16R, (*decFnInfo).fastpathDecMapUintptrUint16R) + fn(map[uintptr]uint32(nil), (*encFnInfo).fastpathEncMapUintptrUint32R, (*decFnInfo).fastpathDecMapUintptrUint32R) + fn(map[uintptr]uint64(nil), (*encFnInfo).fastpathEncMapUintptrUint64R, (*decFnInfo).fastpathDecMapUintptrUint64R) + fn(map[uintptr]uintptr(nil), (*encFnInfo).fastpathEncMapUintptrUintptrR, (*decFnInfo).fastpathDecMapUintptrUintptrR) + fn(map[uintptr]int(nil), (*encFnInfo).fastpathEncMapUintptrIntR, (*decFnInfo).fastpathDecMapUintptrIntR) + fn(map[uintptr]int8(nil), (*encFnInfo).fastpathEncMapUintptrInt8R, (*decFnInfo).fastpathDecMapUintptrInt8R) + fn(map[uintptr]int16(nil), (*encFnInfo).fastpathEncMapUintptrInt16R, (*decFnInfo).fastpathDecMapUintptrInt16R) + fn(map[uintptr]int32(nil), (*encFnInfo).fastpathEncMapUintptrInt32R, (*decFnInfo).fastpathDecMapUintptrInt32R) + fn(map[uintptr]int64(nil), (*encFnInfo).fastpathEncMapUintptrInt64R, (*decFnInfo).fastpathDecMapUintptrInt64R) + fn(map[uintptr]float32(nil), (*encFnInfo).fastpathEncMapUintptrFloat32R, (*decFnInfo).fastpathDecMapUintptrFloat32R) + fn(map[uintptr]float64(nil), (*encFnInfo).fastpathEncMapUintptrFloat64R, (*decFnInfo).fastpathDecMapUintptrFloat64R) + fn(map[uintptr]bool(nil), (*encFnInfo).fastpathEncMapUintptrBoolR, (*decFnInfo).fastpathDecMapUintptrBoolR) + fn(map[int]interface{}(nil), (*encFnInfo).fastpathEncMapIntIntfR, (*decFnInfo).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*encFnInfo).fastpathEncMapIntStringR, (*decFnInfo).fastpathDecMapIntStringR) + fn(map[int]uint(nil), (*encFnInfo).fastpathEncMapIntUintR, (*decFnInfo).fastpathDecMapIntUintR) + fn(map[int]uint8(nil), (*encFnInfo).fastpathEncMapIntUint8R, (*decFnInfo).fastpathDecMapIntUint8R) + fn(map[int]uint16(nil), (*encFnInfo).fastpathEncMapIntUint16R, (*decFnInfo).fastpathDecMapIntUint16R) + fn(map[int]uint32(nil), (*encFnInfo).fastpathEncMapIntUint32R, (*decFnInfo).fastpathDecMapIntUint32R) + fn(map[int]uint64(nil), (*encFnInfo).fastpathEncMapIntUint64R, (*decFnInfo).fastpathDecMapIntUint64R) + fn(map[int]uintptr(nil), (*encFnInfo).fastpathEncMapIntUintptrR, (*decFnInfo).fastpathDecMapIntUintptrR) + fn(map[int]int(nil), (*encFnInfo).fastpathEncMapIntIntR, (*decFnInfo).fastpathDecMapIntIntR) + fn(map[int]int8(nil), (*encFnInfo).fastpathEncMapIntInt8R, (*decFnInfo).fastpathDecMapIntInt8R) + fn(map[int]int16(nil), (*encFnInfo).fastpathEncMapIntInt16R, (*decFnInfo).fastpathDecMapIntInt16R) + fn(map[int]int32(nil), (*encFnInfo).fastpathEncMapIntInt32R, (*decFnInfo).fastpathDecMapIntInt32R) + fn(map[int]int64(nil), (*encFnInfo).fastpathEncMapIntInt64R, (*decFnInfo).fastpathDecMapIntInt64R) + fn(map[int]float32(nil), (*encFnInfo).fastpathEncMapIntFloat32R, (*decFnInfo).fastpathDecMapIntFloat32R) + fn(map[int]float64(nil), (*encFnInfo).fastpathEncMapIntFloat64R, (*decFnInfo).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*encFnInfo).fastpathEncMapIntBoolR, (*decFnInfo).fastpathDecMapIntBoolR) + fn(map[int8]interface{}(nil), (*encFnInfo).fastpathEncMapInt8IntfR, (*decFnInfo).fastpathDecMapInt8IntfR) + fn(map[int8]string(nil), (*encFnInfo).fastpathEncMapInt8StringR, (*decFnInfo).fastpathDecMapInt8StringR) + fn(map[int8]uint(nil), (*encFnInfo).fastpathEncMapInt8UintR, (*decFnInfo).fastpathDecMapInt8UintR) + fn(map[int8]uint8(nil), (*encFnInfo).fastpathEncMapInt8Uint8R, (*decFnInfo).fastpathDecMapInt8Uint8R) + fn(map[int8]uint16(nil), (*encFnInfo).fastpathEncMapInt8Uint16R, (*decFnInfo).fastpathDecMapInt8Uint16R) + fn(map[int8]uint32(nil), (*encFnInfo).fastpathEncMapInt8Uint32R, (*decFnInfo).fastpathDecMapInt8Uint32R) + fn(map[int8]uint64(nil), (*encFnInfo).fastpathEncMapInt8Uint64R, (*decFnInfo).fastpathDecMapInt8Uint64R) + fn(map[int8]uintptr(nil), (*encFnInfo).fastpathEncMapInt8UintptrR, (*decFnInfo).fastpathDecMapInt8UintptrR) + fn(map[int8]int(nil), (*encFnInfo).fastpathEncMapInt8IntR, (*decFnInfo).fastpathDecMapInt8IntR) + fn(map[int8]int8(nil), (*encFnInfo).fastpathEncMapInt8Int8R, (*decFnInfo).fastpathDecMapInt8Int8R) + fn(map[int8]int16(nil), (*encFnInfo).fastpathEncMapInt8Int16R, (*decFnInfo).fastpathDecMapInt8Int16R) + fn(map[int8]int32(nil), (*encFnInfo).fastpathEncMapInt8Int32R, (*decFnInfo).fastpathDecMapInt8Int32R) + fn(map[int8]int64(nil), (*encFnInfo).fastpathEncMapInt8Int64R, (*decFnInfo).fastpathDecMapInt8Int64R) + fn(map[int8]float32(nil), (*encFnInfo).fastpathEncMapInt8Float32R, (*decFnInfo).fastpathDecMapInt8Float32R) + fn(map[int8]float64(nil), (*encFnInfo).fastpathEncMapInt8Float64R, (*decFnInfo).fastpathDecMapInt8Float64R) + fn(map[int8]bool(nil), (*encFnInfo).fastpathEncMapInt8BoolR, (*decFnInfo).fastpathDecMapInt8BoolR) + fn(map[int16]interface{}(nil), (*encFnInfo).fastpathEncMapInt16IntfR, (*decFnInfo).fastpathDecMapInt16IntfR) + fn(map[int16]string(nil), (*encFnInfo).fastpathEncMapInt16StringR, (*decFnInfo).fastpathDecMapInt16StringR) + fn(map[int16]uint(nil), (*encFnInfo).fastpathEncMapInt16UintR, (*decFnInfo).fastpathDecMapInt16UintR) + fn(map[int16]uint8(nil), (*encFnInfo).fastpathEncMapInt16Uint8R, (*decFnInfo).fastpathDecMapInt16Uint8R) + fn(map[int16]uint16(nil), (*encFnInfo).fastpathEncMapInt16Uint16R, (*decFnInfo).fastpathDecMapInt16Uint16R) + fn(map[int16]uint32(nil), (*encFnInfo).fastpathEncMapInt16Uint32R, (*decFnInfo).fastpathDecMapInt16Uint32R) + fn(map[int16]uint64(nil), (*encFnInfo).fastpathEncMapInt16Uint64R, (*decFnInfo).fastpathDecMapInt16Uint64R) + fn(map[int16]uintptr(nil), (*encFnInfo).fastpathEncMapInt16UintptrR, (*decFnInfo).fastpathDecMapInt16UintptrR) + fn(map[int16]int(nil), (*encFnInfo).fastpathEncMapInt16IntR, (*decFnInfo).fastpathDecMapInt16IntR) + fn(map[int16]int8(nil), (*encFnInfo).fastpathEncMapInt16Int8R, (*decFnInfo).fastpathDecMapInt16Int8R) + fn(map[int16]int16(nil), (*encFnInfo).fastpathEncMapInt16Int16R, (*decFnInfo).fastpathDecMapInt16Int16R) + fn(map[int16]int32(nil), (*encFnInfo).fastpathEncMapInt16Int32R, (*decFnInfo).fastpathDecMapInt16Int32R) + fn(map[int16]int64(nil), (*encFnInfo).fastpathEncMapInt16Int64R, (*decFnInfo).fastpathDecMapInt16Int64R) + fn(map[int16]float32(nil), (*encFnInfo).fastpathEncMapInt16Float32R, (*decFnInfo).fastpathDecMapInt16Float32R) + fn(map[int16]float64(nil), (*encFnInfo).fastpathEncMapInt16Float64R, (*decFnInfo).fastpathDecMapInt16Float64R) + fn(map[int16]bool(nil), (*encFnInfo).fastpathEncMapInt16BoolR, (*decFnInfo).fastpathDecMapInt16BoolR) + fn(map[int32]interface{}(nil), (*encFnInfo).fastpathEncMapInt32IntfR, (*decFnInfo).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*encFnInfo).fastpathEncMapInt32StringR, (*decFnInfo).fastpathDecMapInt32StringR) + fn(map[int32]uint(nil), (*encFnInfo).fastpathEncMapInt32UintR, (*decFnInfo).fastpathDecMapInt32UintR) + fn(map[int32]uint8(nil), (*encFnInfo).fastpathEncMapInt32Uint8R, (*decFnInfo).fastpathDecMapInt32Uint8R) + fn(map[int32]uint16(nil), (*encFnInfo).fastpathEncMapInt32Uint16R, (*decFnInfo).fastpathDecMapInt32Uint16R) + fn(map[int32]uint32(nil), (*encFnInfo).fastpathEncMapInt32Uint32R, (*decFnInfo).fastpathDecMapInt32Uint32R) + fn(map[int32]uint64(nil), (*encFnInfo).fastpathEncMapInt32Uint64R, (*decFnInfo).fastpathDecMapInt32Uint64R) + fn(map[int32]uintptr(nil), (*encFnInfo).fastpathEncMapInt32UintptrR, (*decFnInfo).fastpathDecMapInt32UintptrR) + fn(map[int32]int(nil), (*encFnInfo).fastpathEncMapInt32IntR, (*decFnInfo).fastpathDecMapInt32IntR) + fn(map[int32]int8(nil), (*encFnInfo).fastpathEncMapInt32Int8R, (*decFnInfo).fastpathDecMapInt32Int8R) + fn(map[int32]int16(nil), (*encFnInfo).fastpathEncMapInt32Int16R, (*decFnInfo).fastpathDecMapInt32Int16R) + fn(map[int32]int32(nil), (*encFnInfo).fastpathEncMapInt32Int32R, (*decFnInfo).fastpathDecMapInt32Int32R) + fn(map[int32]int64(nil), (*encFnInfo).fastpathEncMapInt32Int64R, (*decFnInfo).fastpathDecMapInt32Int64R) + fn(map[int32]float32(nil), (*encFnInfo).fastpathEncMapInt32Float32R, (*decFnInfo).fastpathDecMapInt32Float32R) + fn(map[int32]float64(nil), (*encFnInfo).fastpathEncMapInt32Float64R, (*decFnInfo).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*encFnInfo).fastpathEncMapInt32BoolR, (*decFnInfo).fastpathDecMapInt32BoolR) + fn(map[int64]interface{}(nil), (*encFnInfo).fastpathEncMapInt64IntfR, (*decFnInfo).fastpathDecMapInt64IntfR) + fn(map[int64]string(nil), (*encFnInfo).fastpathEncMapInt64StringR, (*decFnInfo).fastpathDecMapInt64StringR) + fn(map[int64]uint(nil), (*encFnInfo).fastpathEncMapInt64UintR, (*decFnInfo).fastpathDecMapInt64UintR) + fn(map[int64]uint8(nil), (*encFnInfo).fastpathEncMapInt64Uint8R, (*decFnInfo).fastpathDecMapInt64Uint8R) + fn(map[int64]uint16(nil), (*encFnInfo).fastpathEncMapInt64Uint16R, (*decFnInfo).fastpathDecMapInt64Uint16R) + fn(map[int64]uint32(nil), (*encFnInfo).fastpathEncMapInt64Uint32R, (*decFnInfo).fastpathDecMapInt64Uint32R) + fn(map[int64]uint64(nil), (*encFnInfo).fastpathEncMapInt64Uint64R, (*decFnInfo).fastpathDecMapInt64Uint64R) + fn(map[int64]uintptr(nil), (*encFnInfo).fastpathEncMapInt64UintptrR, (*decFnInfo).fastpathDecMapInt64UintptrR) + fn(map[int64]int(nil), (*encFnInfo).fastpathEncMapInt64IntR, (*decFnInfo).fastpathDecMapInt64IntR) + fn(map[int64]int8(nil), (*encFnInfo).fastpathEncMapInt64Int8R, (*decFnInfo).fastpathDecMapInt64Int8R) + fn(map[int64]int16(nil), (*encFnInfo).fastpathEncMapInt64Int16R, (*decFnInfo).fastpathDecMapInt64Int16R) + fn(map[int64]int32(nil), (*encFnInfo).fastpathEncMapInt64Int32R, (*decFnInfo).fastpathDecMapInt64Int32R) + fn(map[int64]int64(nil), (*encFnInfo).fastpathEncMapInt64Int64R, (*decFnInfo).fastpathDecMapInt64Int64R) + fn(map[int64]float32(nil), (*encFnInfo).fastpathEncMapInt64Float32R, (*decFnInfo).fastpathDecMapInt64Float32R) + fn(map[int64]float64(nil), (*encFnInfo).fastpathEncMapInt64Float64R, (*decFnInfo).fastpathDecMapInt64Float64R) + fn(map[int64]bool(nil), (*encFnInfo).fastpathEncMapInt64BoolR, (*decFnInfo).fastpathDecMapInt64BoolR) + fn(map[bool]interface{}(nil), (*encFnInfo).fastpathEncMapBoolIntfR, (*decFnInfo).fastpathDecMapBoolIntfR) + fn(map[bool]string(nil), (*encFnInfo).fastpathEncMapBoolStringR, (*decFnInfo).fastpathDecMapBoolStringR) + fn(map[bool]uint(nil), (*encFnInfo).fastpathEncMapBoolUintR, (*decFnInfo).fastpathDecMapBoolUintR) + fn(map[bool]uint8(nil), (*encFnInfo).fastpathEncMapBoolUint8R, (*decFnInfo).fastpathDecMapBoolUint8R) + fn(map[bool]uint16(nil), (*encFnInfo).fastpathEncMapBoolUint16R, (*decFnInfo).fastpathDecMapBoolUint16R) + fn(map[bool]uint32(nil), (*encFnInfo).fastpathEncMapBoolUint32R, (*decFnInfo).fastpathDecMapBoolUint32R) + fn(map[bool]uint64(nil), (*encFnInfo).fastpathEncMapBoolUint64R, (*decFnInfo).fastpathDecMapBoolUint64R) + fn(map[bool]uintptr(nil), (*encFnInfo).fastpathEncMapBoolUintptrR, (*decFnInfo).fastpathDecMapBoolUintptrR) + fn(map[bool]int(nil), (*encFnInfo).fastpathEncMapBoolIntR, (*decFnInfo).fastpathDecMapBoolIntR) + fn(map[bool]int8(nil), (*encFnInfo).fastpathEncMapBoolInt8R, (*decFnInfo).fastpathDecMapBoolInt8R) + fn(map[bool]int16(nil), (*encFnInfo).fastpathEncMapBoolInt16R, (*decFnInfo).fastpathDecMapBoolInt16R) + fn(map[bool]int32(nil), (*encFnInfo).fastpathEncMapBoolInt32R, (*decFnInfo).fastpathDecMapBoolInt32R) + fn(map[bool]int64(nil), (*encFnInfo).fastpathEncMapBoolInt64R, (*decFnInfo).fastpathDecMapBoolInt64R) + fn(map[bool]float32(nil), (*encFnInfo).fastpathEncMapBoolFloat32R, (*decFnInfo).fastpathDecMapBoolFloat32R) + fn(map[bool]float64(nil), (*encFnInfo).fastpathEncMapBoolFloat64R, (*decFnInfo).fastpathDecMapBoolFloat64R) + fn(map[bool]bool(nil), (*encFnInfo).fastpathEncMapBoolBoolR, (*decFnInfo).fastpathDecMapBoolBoolR) + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) + + case []string: + fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) + + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) + + case map[string]string: + fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) + + case map[string]uint: + fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) + + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) + + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) + + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) + + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) + + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) + + case map[string]int: + fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) + + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) + + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) + + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) + + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) + + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) + + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) + + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) + + case []float32: + fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) + + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) + + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) + + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) + + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) + + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) + + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) + + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) + + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) + + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) + + case []float64: + fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) + + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) + + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) + + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) + + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) + + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) + + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) + + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) + + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) + + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) + + case []uint: + fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) + + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) + + case map[uint]string: + fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) + + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint]int: + fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) + + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) + + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) + + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) + + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) + + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) + + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) + + case []uint16: + fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) + + case []uint32: + fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) + + case []uint64: + fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) + + case []uintptr: + fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) + + case []int: + fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) + + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) + + case map[int]string: + fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) + + case map[int]uint: + fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) + + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) + + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) + + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) + + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) + + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) + + case map[int]int: + fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) + + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) + + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) + + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) + + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) + + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) + + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) + + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) + + case []int8: + fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) + + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) + + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) + + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) + + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) + + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) + + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) + + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) + + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) + + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) + + case []int16: + fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) + + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) + + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) + + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) + + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) + + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) + + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) + + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) + + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) + + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) + + case []int32: + fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) + + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) + + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) + + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) + + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) + + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) + + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) + + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) + + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) + + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) + + case []int64: + fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) + + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) + + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) + + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) + + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) + + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) + + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) + + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) + + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) + + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) + + case []bool: + fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) + + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) + + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) + + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) + + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) + + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) + + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) + + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) + + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) + + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) + + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) + + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) + + case []string: + fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) + + case []float32: + fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) + + case []float64: + fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) + + case []uint: + fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) + + case []uint16: + fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) + + case []uint32: + fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) + + case []uint64: + fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) + + case []uintptr: + fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) + + case []int: + fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) + + case []int8: + fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) + + case []int16: + fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) + + case []int32: + fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) + + case []int64: + fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) + + case []bool: + fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) + + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) + + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) + + case map[string]string: + fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) + + case map[string]uint: + fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) + + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) + + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) + + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) + + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) + + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) + + case map[string]int: + fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) + + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) + + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) + + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) + + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) + + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) + + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) + + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) + + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) + + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) + + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) + + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) + + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) + + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) + + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) + + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) + + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) + + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) + + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) + + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) + + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) + + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) + + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) + + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) + + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) + + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) + + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) + + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) + + case map[uint]string: + fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) + + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) + + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) + + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint]int: + fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) + + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) + + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) + + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) + + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) + + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) + + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) + + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) + + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) + + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) + + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) + + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) + + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) + + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) + + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) + + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) + + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) + + case map[int]string: + fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) + + case map[int]uint: + fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) + + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) + + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) + + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) + + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) + + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) + + case map[int]int: + fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) + + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) + + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) + + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) + + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) + + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) + + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) + + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) + + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) + + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) + + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) + + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) + + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) + + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) + + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) + + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) + + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) + + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) + + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) + + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) + + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) + + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) + + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) + + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) + + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) + + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) + + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) + + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) + + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) + + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) + + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) + + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) + + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) + + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) + + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) + + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) + + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) + + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) + + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) + + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) + + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) + + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) + + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) + + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) + + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) + + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) + + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) + + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) + + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) + + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) + + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) + + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) + + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) + + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) + + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) + + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) + + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) + + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) + + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) + + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) + + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) + + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) + + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) + + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions + +func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeString(c_UTF8, v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceStringV(v []string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeString(c_UTF8, v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeFloat32(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeFloat32(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeFloat64(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeFloat64(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceUintV(v []uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceIntV(v []int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceInt8V(v []int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceInt16V(v []int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceInt32V(v []int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceInt64V(v []int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { + cr.sendContainerState(containerArrayElem) + } + ee.EncodeBool(v2) + } + if cr != nil { + cr.sendContainerState(containerArrayEnd) + } +} + +func (_ fastpathT) EncAsMapSliceBoolV(v []bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeBool(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) { + fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) { + fastpathTV.EncMapIntfStringV(rv.Interface().(map[interface{}]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) { + fastpathTV.EncMapIntfUintV(rv.Interface().(map[interface{}]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) { + fastpathTV.EncMapIntfUint8V(rv.Interface().(map[interface{}]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) { + fastpathTV.EncMapIntfUint16V(rv.Interface().(map[interface{}]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) { + fastpathTV.EncMapIntfUint32V(rv.Interface().(map[interface{}]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) { + fastpathTV.EncMapIntfUint64V(rv.Interface().(map[interface{}]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) { + fastpathTV.EncMapIntfUintptrV(rv.Interface().(map[interface{}]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) { + fastpathTV.EncMapIntfIntV(rv.Interface().(map[interface{}]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) { + fastpathTV.EncMapIntfInt8V(rv.Interface().(map[interface{}]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) { + fastpathTV.EncMapIntfInt16V(rv.Interface().(map[interface{}]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) { + fastpathTV.EncMapIntfInt32V(rv.Interface().(map[interface{}]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) { + fastpathTV.EncMapIntfInt64V(rv.Interface().(map[interface{}]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) { + fastpathTV.EncMapIntfFloat32V(rv.Interface().(map[interface{}]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) { + fastpathTV.EncMapIntfFloat64V(rv.Interface().(map[interface{}]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) { + fastpathTV.EncMapIntfBoolV(rv.Interface().(map[interface{}]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.asis(v2[j].v) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) { + fastpathTV.EncMapStringIntfV(rv.Interface().(map[string]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) { + fastpathTV.EncMapStringStringV(rv.Interface().(map[string]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) { + fastpathTV.EncMapStringUintV(rv.Interface().(map[string]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) { + fastpathTV.EncMapStringUint8V(rv.Interface().(map[string]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) { + fastpathTV.EncMapStringUint16V(rv.Interface().(map[string]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) { + fastpathTV.EncMapStringUint32V(rv.Interface().(map[string]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) { + fastpathTV.EncMapStringUint64V(rv.Interface().(map[string]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) { + fastpathTV.EncMapStringUintptrV(rv.Interface().(map[string]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) { + fastpathTV.EncMapStringIntV(rv.Interface().(map[string]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) { + fastpathTV.EncMapStringInt8V(rv.Interface().(map[string]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) { + fastpathTV.EncMapStringInt16V(rv.Interface().(map[string]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) { + fastpathTV.EncMapStringInt32V(rv.Interface().(map[string]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) { + fastpathTV.EncMapStringInt64V(rv.Interface().(map[string]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) { + fastpathTV.EncMapStringFloat32V(rv.Interface().(map[string]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) { + fastpathTV.EncMapStringFloat64V(rv.Interface().(map[string]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) { + fastpathTV.EncMapStringBoolV(rv.Interface().(map[string]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) { + fastpathTV.EncMapFloat32IntfV(rv.Interface().(map[float32]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) { + fastpathTV.EncMapFloat32StringV(rv.Interface().(map[float32]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) { + fastpathTV.EncMapFloat32UintV(rv.Interface().(map[float32]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint8V(rv.Interface().(map[float32]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint16V(rv.Interface().(map[float32]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint32V(rv.Interface().(map[float32]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) { + fastpathTV.EncMapFloat32Uint64V(rv.Interface().(map[float32]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) { + fastpathTV.EncMapFloat32UintptrV(rv.Interface().(map[float32]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) { + fastpathTV.EncMapFloat32IntV(rv.Interface().(map[float32]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int8V(rv.Interface().(map[float32]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int16V(rv.Interface().(map[float32]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int32V(rv.Interface().(map[float32]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) { + fastpathTV.EncMapFloat32Int64V(rv.Interface().(map[float32]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) { + fastpathTV.EncMapFloat32Float32V(rv.Interface().(map[float32]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) { + fastpathTV.EncMapFloat32Float64V(rv.Interface().(map[float32]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) { + fastpathTV.EncMapFloat32BoolV(rv.Interface().(map[float32]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(float32(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat32(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) { + fastpathTV.EncMapFloat64IntfV(rv.Interface().(map[float64]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) { + fastpathTV.EncMapFloat64StringV(rv.Interface().(map[float64]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) { + fastpathTV.EncMapFloat64UintV(rv.Interface().(map[float64]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint8V(rv.Interface().(map[float64]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint16V(rv.Interface().(map[float64]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint32V(rv.Interface().(map[float64]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) { + fastpathTV.EncMapFloat64Uint64V(rv.Interface().(map[float64]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) { + fastpathTV.EncMapFloat64UintptrV(rv.Interface().(map[float64]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) { + fastpathTV.EncMapFloat64IntV(rv.Interface().(map[float64]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int8V(rv.Interface().(map[float64]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int16V(rv.Interface().(map[float64]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int32V(rv.Interface().(map[float64]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) { + fastpathTV.EncMapFloat64Int64V(rv.Interface().(map[float64]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) { + fastpathTV.EncMapFloat64Float32V(rv.Interface().(map[float64]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) { + fastpathTV.EncMapFloat64Float64V(rv.Interface().(map[float64]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) { + fastpathTV.EncMapFloat64BoolV(rv.Interface().(map[float64]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(float64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeFloat64(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) { + fastpathTV.EncMapUintIntfV(rv.Interface().(map[uint]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) { + fastpathTV.EncMapUintStringV(rv.Interface().(map[uint]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) { + fastpathTV.EncMapUintUintV(rv.Interface().(map[uint]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) { + fastpathTV.EncMapUintUint8V(rv.Interface().(map[uint]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) { + fastpathTV.EncMapUintUint16V(rv.Interface().(map[uint]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) { + fastpathTV.EncMapUintUint32V(rv.Interface().(map[uint]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) { + fastpathTV.EncMapUintUint64V(rv.Interface().(map[uint]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) { + fastpathTV.EncMapUintUintptrV(rv.Interface().(map[uint]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) { + fastpathTV.EncMapUintIntV(rv.Interface().(map[uint]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) { + fastpathTV.EncMapUintInt8V(rv.Interface().(map[uint]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) { + fastpathTV.EncMapUintInt16V(rv.Interface().(map[uint]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) { + fastpathTV.EncMapUintInt32V(rv.Interface().(map[uint]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) { + fastpathTV.EncMapUintInt64V(rv.Interface().(map[uint]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) { + fastpathTV.EncMapUintFloat32V(rv.Interface().(map[uint]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) { + fastpathTV.EncMapUintFloat64V(rv.Interface().(map[uint]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) { + fastpathTV.EncMapUintBoolV(rv.Interface().(map[uint]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) { + fastpathTV.EncMapUint8IntfV(rv.Interface().(map[uint8]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) { + fastpathTV.EncMapUint8StringV(rv.Interface().(map[uint8]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) { + fastpathTV.EncMapUint8UintV(rv.Interface().(map[uint8]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint8V(rv.Interface().(map[uint8]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint16V(rv.Interface().(map[uint8]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint32V(rv.Interface().(map[uint8]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint8Uint64V(rv.Interface().(map[uint8]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint8UintptrV(rv.Interface().(map[uint8]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) { + fastpathTV.EncMapUint8IntV(rv.Interface().(map[uint8]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) { + fastpathTV.EncMapUint8Int8V(rv.Interface().(map[uint8]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) { + fastpathTV.EncMapUint8Int16V(rv.Interface().(map[uint8]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) { + fastpathTV.EncMapUint8Int32V(rv.Interface().(map[uint8]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) { + fastpathTV.EncMapUint8Int64V(rv.Interface().(map[uint8]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) { + fastpathTV.EncMapUint8Float32V(rv.Interface().(map[uint8]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) { + fastpathTV.EncMapUint8Float64V(rv.Interface().(map[uint8]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) { + fastpathTV.EncMapUint8BoolV(rv.Interface().(map[uint8]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) { + fastpathTV.EncMapUint16IntfV(rv.Interface().(map[uint16]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) { + fastpathTV.EncMapUint16StringV(rv.Interface().(map[uint16]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) { + fastpathTV.EncMapUint16UintV(rv.Interface().(map[uint16]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint8V(rv.Interface().(map[uint16]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint16V(rv.Interface().(map[uint16]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint32V(rv.Interface().(map[uint16]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint16Uint64V(rv.Interface().(map[uint16]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint16UintptrV(rv.Interface().(map[uint16]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) { + fastpathTV.EncMapUint16IntV(rv.Interface().(map[uint16]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) { + fastpathTV.EncMapUint16Int8V(rv.Interface().(map[uint16]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) { + fastpathTV.EncMapUint16Int16V(rv.Interface().(map[uint16]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) { + fastpathTV.EncMapUint16Int32V(rv.Interface().(map[uint16]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) { + fastpathTV.EncMapUint16Int64V(rv.Interface().(map[uint16]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) { + fastpathTV.EncMapUint16Float32V(rv.Interface().(map[uint16]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) { + fastpathTV.EncMapUint16Float64V(rv.Interface().(map[uint16]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) { + fastpathTV.EncMapUint16BoolV(rv.Interface().(map[uint16]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) { + fastpathTV.EncMapUint32IntfV(rv.Interface().(map[uint32]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) { + fastpathTV.EncMapUint32StringV(rv.Interface().(map[uint32]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) { + fastpathTV.EncMapUint32UintV(rv.Interface().(map[uint32]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint8V(rv.Interface().(map[uint32]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint16V(rv.Interface().(map[uint32]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint32V(rv.Interface().(map[uint32]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint32Uint64V(rv.Interface().(map[uint32]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint32UintptrV(rv.Interface().(map[uint32]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) { + fastpathTV.EncMapUint32IntV(rv.Interface().(map[uint32]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) { + fastpathTV.EncMapUint32Int8V(rv.Interface().(map[uint32]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) { + fastpathTV.EncMapUint32Int16V(rv.Interface().(map[uint32]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) { + fastpathTV.EncMapUint32Int32V(rv.Interface().(map[uint32]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) { + fastpathTV.EncMapUint32Int64V(rv.Interface().(map[uint32]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) { + fastpathTV.EncMapUint32Float32V(rv.Interface().(map[uint32]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) { + fastpathTV.EncMapUint32Float64V(rv.Interface().(map[uint32]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) { + fastpathTV.EncMapUint32BoolV(rv.Interface().(map[uint32]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) { + fastpathTV.EncMapUint64IntfV(rv.Interface().(map[uint64]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) { + fastpathTV.EncMapUint64StringV(rv.Interface().(map[uint64]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) { + fastpathTV.EncMapUint64UintV(rv.Interface().(map[uint64]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint8V(rv.Interface().(map[uint64]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint16V(rv.Interface().(map[uint64]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint32V(rv.Interface().(map[uint64]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) { + fastpathTV.EncMapUint64Uint64V(rv.Interface().(map[uint64]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) { + fastpathTV.EncMapUint64UintptrV(rv.Interface().(map[uint64]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) { + fastpathTV.EncMapUint64IntV(rv.Interface().(map[uint64]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) { + fastpathTV.EncMapUint64Int8V(rv.Interface().(map[uint64]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) { + fastpathTV.EncMapUint64Int16V(rv.Interface().(map[uint64]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) { + fastpathTV.EncMapUint64Int32V(rv.Interface().(map[uint64]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) { + fastpathTV.EncMapUint64Int64V(rv.Interface().(map[uint64]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) { + fastpathTV.EncMapUint64Float32V(rv.Interface().(map[uint64]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) { + fastpathTV.EncMapUint64Float64V(rv.Interface().(map[uint64]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) { + fastpathTV.EncMapUint64BoolV(rv.Interface().(map[uint64]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(uint64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeUint(uint64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) { + fastpathTV.EncMapUintptrIntfV(rv.Interface().(map[uintptr]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) { + fastpathTV.EncMapUintptrStringV(rv.Interface().(map[uintptr]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) { + fastpathTV.EncMapUintptrUintV(rv.Interface().(map[uintptr]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint8V(rv.Interface().(map[uintptr]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint16V(rv.Interface().(map[uintptr]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint32V(rv.Interface().(map[uintptr]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) { + fastpathTV.EncMapUintptrUint64V(rv.Interface().(map[uintptr]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) { + fastpathTV.EncMapUintptrUintptrV(rv.Interface().(map[uintptr]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) { + fastpathTV.EncMapUintptrIntV(rv.Interface().(map[uintptr]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt8V(rv.Interface().(map[uintptr]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt16V(rv.Interface().(map[uintptr]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt32V(rv.Interface().(map[uintptr]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) { + fastpathTV.EncMapUintptrInt64V(rv.Interface().(map[uintptr]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) { + fastpathTV.EncMapUintptrFloat32V(rv.Interface().(map[uintptr]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) { + fastpathTV.EncMapUintptrFloat64V(rv.Interface().(map[uintptr]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) { + fastpathTV.EncMapUintptrBoolV(rv.Interface().(map[uintptr]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(uintptr(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + e.encode(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) { + fastpathTV.EncMapIntIntfV(rv.Interface().(map[int]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) { + fastpathTV.EncMapIntStringV(rv.Interface().(map[int]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) { + fastpathTV.EncMapIntUintV(rv.Interface().(map[int]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) { + fastpathTV.EncMapIntUint8V(rv.Interface().(map[int]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) { + fastpathTV.EncMapIntUint16V(rv.Interface().(map[int]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) { + fastpathTV.EncMapIntUint32V(rv.Interface().(map[int]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) { + fastpathTV.EncMapIntUint64V(rv.Interface().(map[int]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) { + fastpathTV.EncMapIntUintptrV(rv.Interface().(map[int]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) { + fastpathTV.EncMapIntIntV(rv.Interface().(map[int]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) { + fastpathTV.EncMapIntInt8V(rv.Interface().(map[int]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) { + fastpathTV.EncMapIntInt16V(rv.Interface().(map[int]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) { + fastpathTV.EncMapIntInt32V(rv.Interface().(map[int]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) { + fastpathTV.EncMapIntInt64V(rv.Interface().(map[int]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) { + fastpathTV.EncMapIntFloat32V(rv.Interface().(map[int]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) { + fastpathTV.EncMapIntFloat64V(rv.Interface().(map[int]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) { + fastpathTV.EncMapIntBoolV(rv.Interface().(map[int]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) { + fastpathTV.EncMapInt8IntfV(rv.Interface().(map[int8]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) { + fastpathTV.EncMapInt8StringV(rv.Interface().(map[int8]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) { + fastpathTV.EncMapInt8UintV(rv.Interface().(map[int8]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint8V(rv.Interface().(map[int8]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint16V(rv.Interface().(map[int8]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint32V(rv.Interface().(map[int8]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt8Uint64V(rv.Interface().(map[int8]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt8UintptrV(rv.Interface().(map[int8]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) { + fastpathTV.EncMapInt8IntV(rv.Interface().(map[int8]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) { + fastpathTV.EncMapInt8Int8V(rv.Interface().(map[int8]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) { + fastpathTV.EncMapInt8Int16V(rv.Interface().(map[int8]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) { + fastpathTV.EncMapInt8Int32V(rv.Interface().(map[int8]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) { + fastpathTV.EncMapInt8Int64V(rv.Interface().(map[int8]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) { + fastpathTV.EncMapInt8Float32V(rv.Interface().(map[int8]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) { + fastpathTV.EncMapInt8Float64V(rv.Interface().(map[int8]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) { + fastpathTV.EncMapInt8BoolV(rv.Interface().(map[int8]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int8(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) { + fastpathTV.EncMapInt16IntfV(rv.Interface().(map[int16]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) { + fastpathTV.EncMapInt16StringV(rv.Interface().(map[int16]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) { + fastpathTV.EncMapInt16UintV(rv.Interface().(map[int16]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint8V(rv.Interface().(map[int16]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint16V(rv.Interface().(map[int16]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint32V(rv.Interface().(map[int16]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt16Uint64V(rv.Interface().(map[int16]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt16UintptrV(rv.Interface().(map[int16]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) { + fastpathTV.EncMapInt16IntV(rv.Interface().(map[int16]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) { + fastpathTV.EncMapInt16Int8V(rv.Interface().(map[int16]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) { + fastpathTV.EncMapInt16Int16V(rv.Interface().(map[int16]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) { + fastpathTV.EncMapInt16Int32V(rv.Interface().(map[int16]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) { + fastpathTV.EncMapInt16Int64V(rv.Interface().(map[int16]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) { + fastpathTV.EncMapInt16Float32V(rv.Interface().(map[int16]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) { + fastpathTV.EncMapInt16Float64V(rv.Interface().(map[int16]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) { + fastpathTV.EncMapInt16BoolV(rv.Interface().(map[int16]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int16(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) { + fastpathTV.EncMapInt32IntfV(rv.Interface().(map[int32]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) { + fastpathTV.EncMapInt32StringV(rv.Interface().(map[int32]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) { + fastpathTV.EncMapInt32UintV(rv.Interface().(map[int32]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint8V(rv.Interface().(map[int32]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint16V(rv.Interface().(map[int32]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint32V(rv.Interface().(map[int32]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt32Uint64V(rv.Interface().(map[int32]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt32UintptrV(rv.Interface().(map[int32]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) { + fastpathTV.EncMapInt32IntV(rv.Interface().(map[int32]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) { + fastpathTV.EncMapInt32Int8V(rv.Interface().(map[int32]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) { + fastpathTV.EncMapInt32Int16V(rv.Interface().(map[int32]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) { + fastpathTV.EncMapInt32Int32V(rv.Interface().(map[int32]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) { + fastpathTV.EncMapInt32Int64V(rv.Interface().(map[int32]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) { + fastpathTV.EncMapInt32Float32V(rv.Interface().(map[int32]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) { + fastpathTV.EncMapInt32Float64V(rv.Interface().(map[int32]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) { + fastpathTV.EncMapInt32BoolV(rv.Interface().(map[int32]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int32(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) { + fastpathTV.EncMapInt64IntfV(rv.Interface().(map[int64]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) { + fastpathTV.EncMapInt64StringV(rv.Interface().(map[int64]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) { + fastpathTV.EncMapInt64UintV(rv.Interface().(map[int64]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint8V(rv.Interface().(map[int64]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint16V(rv.Interface().(map[int64]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint32V(rv.Interface().(map[int64]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) { + fastpathTV.EncMapInt64Uint64V(rv.Interface().(map[int64]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) { + fastpathTV.EncMapInt64UintptrV(rv.Interface().(map[int64]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) { + fastpathTV.EncMapInt64IntV(rv.Interface().(map[int64]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) { + fastpathTV.EncMapInt64Int8V(rv.Interface().(map[int64]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) { + fastpathTV.EncMapInt64Int16V(rv.Interface().(map[int64]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) { + fastpathTV.EncMapInt64Int32V(rv.Interface().(map[int64]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) { + fastpathTV.EncMapInt64Int64V(rv.Interface().(map[int64]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) { + fastpathTV.EncMapInt64Float32V(rv.Interface().(map[int64]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) { + fastpathTV.EncMapInt64Float64V(rv.Interface().(map[int64]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) { + fastpathTV.EncMapInt64BoolV(rv.Interface().(map[int64]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(int64(k2))) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeInt(int64(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) { + fastpathTV.EncMapBoolIntfV(rv.Interface().(map[bool]interface{}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) { + fastpathTV.EncMapBoolStringV(rv.Interface().(map[bool]string), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeString(c_UTF8, v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) { + fastpathTV.EncMapBoolUintV(rv.Interface().(map[bool]uint), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) { + fastpathTV.EncMapBoolUint8V(rv.Interface().(map[bool]uint8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) { + fastpathTV.EncMapBoolUint16V(rv.Interface().(map[bool]uint16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) { + fastpathTV.EncMapBoolUint32V(rv.Interface().(map[bool]uint32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) { + fastpathTV.EncMapBoolUint64V(rv.Interface().(map[bool]uint64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeUint(uint64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) { + fastpathTV.EncMapBoolUintptrV(rv.Interface().(map[bool]uintptr), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + e.encode(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) { + fastpathTV.EncMapBoolIntV(rv.Interface().(map[bool]int), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) { + fastpathTV.EncMapBoolInt8V(rv.Interface().(map[bool]int8), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) { + fastpathTV.EncMapBoolInt16V(rv.Interface().(map[bool]int16), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) { + fastpathTV.EncMapBoolInt32V(rv.Interface().(map[bool]int32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) { + fastpathTV.EncMapBoolInt64V(rv.Interface().(map[bool]int64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeInt(int64(v2)) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) { + fastpathTV.EncMapBoolFloat32V(rv.Interface().(map[bool]float32), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat32(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) { + fastpathTV.EncMapBoolFloat64V(rv.Interface().(map[bool]float64), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeFloat64(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) { + fastpathTV.EncMapBoolBoolV(rv.Interface().(map[bool]bool), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(bool(k2)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + ee.EncodeBool(k2) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + ee.EncodeBool(v2) + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, d) + case *[]interface{}: + v2, changed2 := fastpathTV.DecSliceIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]interface{}: + fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]interface{}: + v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]string: + fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]string: + v2, changed2 := fastpathTV.DecMapIntfStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint: + fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint: + v2, changed2 := fastpathTV.DecMapIntfUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint8: + fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint8: + v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint16: + fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint16: + v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint32: + fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint32: + v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uint64: + fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uint64: + v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]uintptr: + fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]uintptr: + v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int: + fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int: + v2, changed2 := fastpathTV.DecMapIntfIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int8: + fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int8: + v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int16: + fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int16: + v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int32: + fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int32: + v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]int64: + fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]int64: + v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]float32: + fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]float32: + v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]float64: + fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]float64: + v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[interface{}]bool: + fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, d) + case *map[interface{}]bool: + v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []string: + fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, d) + case *[]string: + v2, changed2 := fastpathTV.DecSliceStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]interface{}: + fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, d) + case *map[string]interface{}: + v2, changed2 := fastpathTV.DecMapStringIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]string: + fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, d) + case *map[string]string: + v2, changed2 := fastpathTV.DecMapStringStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint: + fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, d) + case *map[string]uint: + v2, changed2 := fastpathTV.DecMapStringUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint8: + fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint8: + v2, changed2 := fastpathTV.DecMapStringUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint16: + fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint16: + v2, changed2 := fastpathTV.DecMapStringUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint32: + fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint32: + v2, changed2 := fastpathTV.DecMapStringUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uint64: + fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, d) + case *map[string]uint64: + v2, changed2 := fastpathTV.DecMapStringUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]uintptr: + fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[string]uintptr: + v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int: + fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, d) + case *map[string]int: + v2, changed2 := fastpathTV.DecMapStringIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int8: + fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, d) + case *map[string]int8: + v2, changed2 := fastpathTV.DecMapStringInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int16: + fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, d) + case *map[string]int16: + v2, changed2 := fastpathTV.DecMapStringInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int32: + fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, d) + case *map[string]int32: + v2, changed2 := fastpathTV.DecMapStringInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]int64: + fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, d) + case *map[string]int64: + v2, changed2 := fastpathTV.DecMapStringInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]float32: + fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[string]float32: + v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]float64: + fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[string]float64: + v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[string]bool: + fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, d) + case *map[string]bool: + v2, changed2 := fastpathTV.DecMapStringBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []float32: + fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, d) + case *[]float32: + v2, changed2 := fastpathTV.DecSliceFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]interface{}: + fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, d) + case *map[float32]interface{}: + v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]string: + fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, d) + case *map[float32]string: + v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint: + fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint: + v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint8: + fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint8: + v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint16: + fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint16: + v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint32: + fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint32: + v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uint64: + fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[float32]uint64: + v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]uintptr: + fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[float32]uintptr: + v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int: + fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, d) + case *map[float32]int: + v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int8: + fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int8: + v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int16: + fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int16: + v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int32: + fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int32: + v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]int64: + fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, d) + case *map[float32]int64: + v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]float32: + fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, d) + case *map[float32]float32: + v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]float64: + fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, d) + case *map[float32]float64: + v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float32]bool: + fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, d) + case *map[float32]bool: + v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []float64: + fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, d) + case *[]float64: + v2, changed2 := fastpathTV.DecSliceFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]interface{}: + fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, d) + case *map[float64]interface{}: + v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]string: + fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, d) + case *map[float64]string: + v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint: + fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint: + v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint8: + fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint8: + v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint16: + fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint16: + v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint32: + fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint32: + v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uint64: + fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[float64]uint64: + v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]uintptr: + fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[float64]uintptr: + v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int: + fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, d) + case *map[float64]int: + v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int8: + fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int8: + v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int16: + fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int16: + v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int32: + fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int32: + v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]int64: + fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, d) + case *map[float64]int64: + v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]float32: + fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, d) + case *map[float64]float32: + v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]float64: + fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, d) + case *map[float64]float64: + v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[float64]bool: + fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, d) + case *map[float64]bool: + v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint: + fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, d) + case *[]uint: + v2, changed2 := fastpathTV.DecSliceUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]interface{}: + fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint]interface{}: + v2, changed2 := fastpathTV.DecMapUintIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]string: + fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, d) + case *map[uint]string: + v2, changed2 := fastpathTV.DecMapUintStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint: + fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint: + v2, changed2 := fastpathTV.DecMapUintUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint8: + fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint8: + v2, changed2 := fastpathTV.DecMapUintUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint16: + fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint16: + v2, changed2 := fastpathTV.DecMapUintUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint32: + fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint32: + v2, changed2 := fastpathTV.DecMapUintUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uint64: + fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint]uint64: + v2, changed2 := fastpathTV.DecMapUintUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]uintptr: + fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint]uintptr: + v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int: + fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, d) + case *map[uint]int: + v2, changed2 := fastpathTV.DecMapUintIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int8: + fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int8: + v2, changed2 := fastpathTV.DecMapUintInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int16: + fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int16: + v2, changed2 := fastpathTV.DecMapUintInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int32: + fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int32: + v2, changed2 := fastpathTV.DecMapUintInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]int64: + fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, d) + case *map[uint]int64: + v2, changed2 := fastpathTV.DecMapUintInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]float32: + fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[uint]float32: + v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]float64: + fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[uint]float64: + v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint]bool: + fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint]bool: + v2, changed2 := fastpathTV.DecMapUintBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]interface{}: + fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]interface{}: + v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]string: + fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]string: + v2, changed2 := fastpathTV.DecMapUint8StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint: + fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint: + v2, changed2 := fastpathTV.DecMapUint8UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint8: + fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint8: + v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint16: + fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint16: + v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint32: + fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint32: + v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uint64: + fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uint64: + v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]uintptr: + fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]uintptr: + v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int: + fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int: + v2, changed2 := fastpathTV.DecMapUint8IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int8: + fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int8: + v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int16: + fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int16: + v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int32: + fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int32: + v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]int64: + fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]int64: + v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]float32: + fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]float32: + v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]float64: + fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint8]float64: + v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint8]bool: + fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint8]bool: + v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint16: + fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, d) + case *[]uint16: + v2, changed2 := fastpathTV.DecSliceUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]interface{}: + fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]interface{}: + v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]string: + fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]string: + v2, changed2 := fastpathTV.DecMapUint16StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint: + fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint: + v2, changed2 := fastpathTV.DecMapUint16UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint8: + fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint8: + v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint16: + fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint16: + v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint32: + fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint32: + v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uint64: + fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uint64: + v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]uintptr: + fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]uintptr: + v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int: + fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int: + v2, changed2 := fastpathTV.DecMapUint16IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int8: + fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int8: + v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int16: + fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int16: + v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int32: + fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int32: + v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]int64: + fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]int64: + v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]float32: + fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]float32: + v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]float64: + fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint16]float64: + v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint16]bool: + fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint16]bool: + v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint32: + fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, d) + case *[]uint32: + v2, changed2 := fastpathTV.DecSliceUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]interface{}: + fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]interface{}: + v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]string: + fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]string: + v2, changed2 := fastpathTV.DecMapUint32StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint: + fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint: + v2, changed2 := fastpathTV.DecMapUint32UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint8: + fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint8: + v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint16: + fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint16: + v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint32: + fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint32: + v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uint64: + fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uint64: + v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]uintptr: + fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]uintptr: + v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int: + fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int: + v2, changed2 := fastpathTV.DecMapUint32IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int8: + fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int8: + v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int16: + fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int16: + v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int32: + fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int32: + v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]int64: + fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]int64: + v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]float32: + fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]float32: + v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]float64: + fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint32]float64: + v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint32]bool: + fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint32]bool: + v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uint64: + fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, d) + case *[]uint64: + v2, changed2 := fastpathTV.DecSliceUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]interface{}: + fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]interface{}: + v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]string: + fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]string: + v2, changed2 := fastpathTV.DecMapUint64StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint: + fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint: + v2, changed2 := fastpathTV.DecMapUint64UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint8: + fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint8: + v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint16: + fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint16: + v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint32: + fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint32: + v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uint64: + fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uint64: + v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]uintptr: + fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]uintptr: + v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int: + fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int: + v2, changed2 := fastpathTV.DecMapUint64IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int8: + fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int8: + v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int16: + fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int16: + v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int32: + fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int32: + v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]int64: + fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]int64: + v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]float32: + fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]float32: + v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]float64: + fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, d) + case *map[uint64]float64: + v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uint64]bool: + fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, d) + case *map[uint64]bool: + v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []uintptr: + fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, d) + case *[]uintptr: + v2, changed2 := fastpathTV.DecSliceUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]interface{}: + fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]interface{}: + v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]string: + fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]string: + v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint: + fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint: + v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint8: + fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint8: + v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint16: + fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint16: + v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint32: + fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint32: + v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uint64: + fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uint64: + v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]uintptr: + fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]uintptr: + v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int: + fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int: + v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int8: + fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int8: + v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int16: + fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int16: + v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int32: + fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int32: + v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]int64: + fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]int64: + v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]float32: + fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]float32: + v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]float64: + fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]float64: + v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[uintptr]bool: + fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, d) + case *map[uintptr]bool: + v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int: + fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, d) + case *[]int: + v2, changed2 := fastpathTV.DecSliceIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]interface{}: + fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, d) + case *map[int]interface{}: + v2, changed2 := fastpathTV.DecMapIntIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]string: + fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, d) + case *map[int]string: + v2, changed2 := fastpathTV.DecMapIntStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint: + fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, d) + case *map[int]uint: + v2, changed2 := fastpathTV.DecMapIntUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint8: + fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint8: + v2, changed2 := fastpathTV.DecMapIntUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint16: + fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint16: + v2, changed2 := fastpathTV.DecMapIntUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint32: + fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint32: + v2, changed2 := fastpathTV.DecMapIntUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uint64: + fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, d) + case *map[int]uint64: + v2, changed2 := fastpathTV.DecMapIntUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]uintptr: + fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int]uintptr: + v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int: + fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, d) + case *map[int]int: + v2, changed2 := fastpathTV.DecMapIntIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int8: + fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, d) + case *map[int]int8: + v2, changed2 := fastpathTV.DecMapIntInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int16: + fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, d) + case *map[int]int16: + v2, changed2 := fastpathTV.DecMapIntInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int32: + fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, d) + case *map[int]int32: + v2, changed2 := fastpathTV.DecMapIntInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]int64: + fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, d) + case *map[int]int64: + v2, changed2 := fastpathTV.DecMapIntInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]float32: + fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[int]float32: + v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]float64: + fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[int]float64: + v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int]bool: + fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, d) + case *map[int]bool: + v2, changed2 := fastpathTV.DecMapIntBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int8: + fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, d) + case *[]int8: + v2, changed2 := fastpathTV.DecSliceInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]interface{}: + fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int8]interface{}: + v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]string: + fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, d) + case *map[int8]string: + v2, changed2 := fastpathTV.DecMapInt8StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint: + fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint: + v2, changed2 := fastpathTV.DecMapInt8UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint8: + fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint8: + v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint16: + fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint16: + v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint32: + fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint32: + v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uint64: + fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int8]uint64: + v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]uintptr: + fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int8]uintptr: + v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int: + fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, d) + case *map[int8]int: + v2, changed2 := fastpathTV.DecMapInt8IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int8: + fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int8: + v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int16: + fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int16: + v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int32: + fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int32: + v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]int64: + fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int8]int64: + v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]float32: + fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int8]float32: + v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]float64: + fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int8]float64: + v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int8]bool: + fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int8]bool: + v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int16: + fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, d) + case *[]int16: + v2, changed2 := fastpathTV.DecSliceInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]interface{}: + fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int16]interface{}: + v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]string: + fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, d) + case *map[int16]string: + v2, changed2 := fastpathTV.DecMapInt16StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint: + fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint: + v2, changed2 := fastpathTV.DecMapInt16UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint8: + fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint8: + v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint16: + fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint16: + v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint32: + fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint32: + v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uint64: + fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int16]uint64: + v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]uintptr: + fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int16]uintptr: + v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int: + fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, d) + case *map[int16]int: + v2, changed2 := fastpathTV.DecMapInt16IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int8: + fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int8: + v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int16: + fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int16: + v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int32: + fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int32: + v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]int64: + fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int16]int64: + v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]float32: + fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int16]float32: + v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]float64: + fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int16]float64: + v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int16]bool: + fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int16]bool: + v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int32: + fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, d) + case *[]int32: + v2, changed2 := fastpathTV.DecSliceInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]interface{}: + fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int32]interface{}: + v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]string: + fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, d) + case *map[int32]string: + v2, changed2 := fastpathTV.DecMapInt32StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint: + fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint: + v2, changed2 := fastpathTV.DecMapInt32UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint8: + fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint8: + v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint16: + fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint16: + v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint32: + fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint32: + v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uint64: + fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int32]uint64: + v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]uintptr: + fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int32]uintptr: + v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int: + fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, d) + case *map[int32]int: + v2, changed2 := fastpathTV.DecMapInt32IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int8: + fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int8: + v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int16: + fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int16: + v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int32: + fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int32: + v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]int64: + fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int32]int64: + v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]float32: + fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int32]float32: + v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]float64: + fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int32]float64: + v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int32]bool: + fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int32]bool: + v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []int64: + fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, d) + case *[]int64: + v2, changed2 := fastpathTV.DecSliceInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]interface{}: + fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, d) + case *map[int64]interface{}: + v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]string: + fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, d) + case *map[int64]string: + v2, changed2 := fastpathTV.DecMapInt64StringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint: + fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint: + v2, changed2 := fastpathTV.DecMapInt64UintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint8: + fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint8: + v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint16: + fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint16: + v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint32: + fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint32: + v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uint64: + fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, d) + case *map[int64]uint64: + v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]uintptr: + fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, d) + case *map[int64]uintptr: + v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int: + fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, d) + case *map[int64]int: + v2, changed2 := fastpathTV.DecMapInt64IntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int8: + fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int8: + v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int16: + fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int16: + v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int32: + fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int32: + v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]int64: + fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, d) + case *map[int64]int64: + v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]float32: + fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, d) + case *map[int64]float32: + v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]float64: + fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, d) + case *map[int64]float64: + v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[int64]bool: + fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, d) + case *map[int64]bool: + v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case []bool: + fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, d) + case *[]bool: + v2, changed2 := fastpathTV.DecSliceBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]interface{}: + fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, d) + case *map[bool]interface{}: + v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]string: + fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, d) + case *map[bool]string: + v2, changed2 := fastpathTV.DecMapBoolStringV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint: + fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint: + v2, changed2 := fastpathTV.DecMapBoolUintV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint8: + fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint8: + v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint16: + fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint16: + v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint32: + fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint32: + v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uint64: + fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, d) + case *map[bool]uint64: + v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]uintptr: + fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, d) + case *map[bool]uintptr: + v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int: + fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, d) + case *map[bool]int: + v2, changed2 := fastpathTV.DecMapBoolIntV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int8: + fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int8: + v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int16: + fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int16: + v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int32: + fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int32: + v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]int64: + fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, d) + case *map[bool]int64: + v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]float32: + fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, d) + case *map[bool]float32: + v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]float64: + fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, d) + case *map[bool]float64: + v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + case map[bool]bool: + fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, d) + case *map[bool]bool: + v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions + +func (f *decFnInfo) fastpathDecSliceIntfR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]interface{}) + v, changed := fastpathTV.DecSliceIntfV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]interface{}) + fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceIntfX(vp *[]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecSliceIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, d *Decoder) (_ []interface{}, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []interface{}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]interface{}, xlen) + } + } else { + v = make([]interface{}, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + d.decode(&v[j]) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, nil) + slh.ElemContainerState(j) + d.decode(&v[j]) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []interface{}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]interface{}, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, nil) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + d.decode(&v[j]) + + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceStringR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]string) + v, changed := fastpathTV.DecSliceStringV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]string) + fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceStringX(vp *[]string, checkNil bool, d *Decoder) { + v, changed := f.DecSliceStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d *Decoder) (_ []string, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []string{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]string, xlen) + } + } else { + v = make([]string, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeString() + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, "") + slh.ElemContainerState(j) + v[j] = dd.DecodeString() + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []string{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]string, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, "") + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeString() + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceFloat32R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]float32) + v, changed := fastpathTV.DecSliceFloat32V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]float32) + fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceFloat32X(vp *[]float32, checkNil bool, d *Decoder) { + v, changed := f.DecSliceFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, d *Decoder) (_ []float32, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float32, xlen) + } + } else { + v = make([]float32, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = float32(dd.DecodeFloat(true)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = float32(dd.DecodeFloat(true)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []float32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]float32, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = float32(dd.DecodeFloat(true)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceFloat64R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]float64) + v, changed := fastpathTV.DecSliceFloat64V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]float64) + fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceFloat64X(vp *[]float64, checkNil bool, d *Decoder) { + v, changed := f.DecSliceFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, d *Decoder) (_ []float64, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float64, xlen) + } + } else { + v = make([]float64, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeFloat(false) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = dd.DecodeFloat(false) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []float64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]float64, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeFloat(false) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUintR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint) + v, changed := fastpathTV.DecSliceUintV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint) + fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUintX(vp *[]uint, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Decoder) (_ []uint, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint, xlen) + } + } else { + v = make([]uint, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uint(dd.DecodeUint(uintBitsize)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uint(dd.DecodeUint(uintBitsize)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]uint, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uint(dd.DecodeUint(uintBitsize)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUint16R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint16) + v, changed := fastpathTV.DecSliceUint16V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint16) + fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUint16X(vp *[]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d *Decoder) (_ []uint16, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint16, xlen) + } + } else { + v = make([]uint16, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uint16(dd.DecodeUint(16)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uint16(dd.DecodeUint(16)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]uint16, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uint16(dd.DecodeUint(16)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUint32R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint32) + v, changed := fastpathTV.DecSliceUint32V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint32) + fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUint32X(vp *[]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d *Decoder) (_ []uint32, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint32, xlen) + } + } else { + v = make([]uint32, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uint32(dd.DecodeUint(32)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uint32(dd.DecodeUint(32)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]uint32, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uint32(dd.DecodeUint(32)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUint64R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uint64) + v, changed := fastpathTV.DecSliceUint64V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uint64) + fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUint64X(vp *[]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d *Decoder) (_ []uint64, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint64, xlen) + } + } else { + v = make([]uint64, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeUint(64) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = dd.DecodeUint(64) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uint64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]uint64, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeUint(64) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceUintptrR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]uintptr) + v, changed := fastpathTV.DecSliceUintptrV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]uintptr) + fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecSliceUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, d *Decoder) (_ []uintptr, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uintptr{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uintptr, xlen) + } + } else { + v = make([]uintptr, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []uintptr{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]uintptr, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceIntR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int) + v, changed := fastpathTV.DecSliceIntV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int) + fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceIntX(vp *[]int, checkNil bool, d *Decoder) { + v, changed := f.DecSliceIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decoder) (_ []int, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int, xlen) + } + } else { + v = make([]int, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int(dd.DecodeInt(intBitsize)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int(dd.DecodeInt(intBitsize)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]int, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int(dd.DecodeInt(intBitsize)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt8R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int8) + v, changed := fastpathTV.DecSliceInt8V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int8) + fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt8X(vp *[]int8, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Decoder) (_ []int8, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int8, xlen) + } + } else { + v = make([]int8, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int8(dd.DecodeInt(8)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int8(dd.DecodeInt(8)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]int8, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int8(dd.DecodeInt(8)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt16R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int16) + v, changed := fastpathTV.DecSliceInt16V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int16) + fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt16X(vp *[]int16, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *Decoder) (_ []int16, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int16, xlen) + } + } else { + v = make([]int16, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int16(dd.DecodeInt(16)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int16(dd.DecodeInt(16)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]int16, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int16(dd.DecodeInt(16)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt32R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int32) + v, changed := fastpathTV.DecSliceInt32V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int32) + fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt32X(vp *[]int32, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *Decoder) (_ []int32, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int32, xlen) + } + } else { + v = make([]int32, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = int32(dd.DecodeInt(32)) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = int32(dd.DecodeInt(32)) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]int32, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = int32(dd.DecodeInt(32)) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceInt64R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]int64) + v, changed := fastpathTV.DecSliceInt64V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]int64) + fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceInt64X(vp *[]int64, checkNil bool, d *Decoder) { + v, changed := f.DecSliceInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *Decoder) (_ []int64, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int64, xlen) + } + } else { + v = make([]int64, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeInt(64) + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, 0) + slh.ElemContainerState(j) + v[j] = dd.DecodeInt(64) + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []int64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]int64, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeInt(64) + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecSliceBoolR(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { + vp := rv.Addr().Interface().(*[]bool) + v, changed := fastpathTV.DecSliceBoolV(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]bool) + fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) DecSliceBoolX(vp *[]bool, checkNil bool, d *Decoder) { + v, changed := f.DecSliceBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Decoder) (_ []bool, changed bool) { + dd := d.d + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []bool{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]bool, xlen) + } + } else { + v = make([]bool, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + v[j] = dd.DecodeBool() + } + if xtrunc { + for ; j < containerLenS; j++ { + v = append(v, false) + slh.ElemContainerState(j) + v[j] = dd.DecodeBool() + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() + if breakFound { + if canChange { + if v == nil { + v = []bool{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]bool, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, false) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { + v[j] = dd.DecodeBool() + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]interface{}) + v, changed := fastpathTV.DecMapIntfIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]interface{}) + fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk interface{} + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]string) + v, changed := fastpathTV.DecMapIntfStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]string) + fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]string, xlen) + changed = true + } + + var mk interface{} + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint) + v, changed := fastpathTV.DecMapIntfUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint) + fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint, xlen) + changed = true + } + + var mk interface{} + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint8) + v, changed := fastpathTV.DecMapIntfUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint8) + fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]uint8, xlen) + changed = true + } + + var mk interface{} + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint16) + v, changed := fastpathTV.DecMapIntfUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint16) + fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]uint16, xlen) + changed = true + } + + var mk interface{} + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint32) + v, changed := fastpathTV.DecMapIntfUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint32) + fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]uint32, xlen) + changed = true + } + + var mk interface{} + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uint64) + v, changed := fastpathTV.DecMapIntfUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uint64) + fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint64, xlen) + changed = true + } + + var mk interface{} + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]uintptr) + v, changed := fastpathTV.DecMapIntfUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]uintptr) + fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uintptr, xlen) + changed = true + } + + var mk interface{} + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int) + v, changed := fastpathTV.DecMapIntfIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int) + fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int, xlen) + changed = true + } + + var mk interface{} + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int8) + v, changed := fastpathTV.DecMapIntfInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int8) + fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]int8, xlen) + changed = true + } + + var mk interface{} + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int16) + v, changed := fastpathTV.DecMapIntfInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int16) + fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]int16, xlen) + changed = true + } + + var mk interface{} + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int32) + v, changed := fastpathTV.DecMapIntfInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int32) + fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]int32, xlen) + changed = true + } + + var mk interface{} + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]int64) + v, changed := fastpathTV.DecMapIntfInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]int64) + fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int64, xlen) + changed = true + } + + var mk interface{} + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]float32) + v, changed := fastpathTV.DecMapIntfFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]float32) + fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]float32, xlen) + changed = true + } + + var mk interface{} + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]float64) + v, changed := fastpathTV.DecMapIntfFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]float64) + fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]float64, xlen) + changed = true + } + + var mk interface{} + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntfBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[interface{}]bool) + v, changed := fastpathTV.DecMapIntfBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[interface{}]bool) + fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntfBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[interface{}]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]bool, xlen) + changed = true + } + + var mk interface{} + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]interface{}) + v, changed := fastpathTV.DecMapStringIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]interface{}) + fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[string]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk string + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]string) + v, changed := fastpathTV.DecMapStringStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]string) + fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringStringX(vp *map[string]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canChange bool, + d *Decoder) (_ map[string]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]string, xlen) + changed = true + } + + var mk string + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint) + v, changed := fastpathTV.DecMapStringUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint) + fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUintX(vp *map[string]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint, xlen) + changed = true + } + + var mk string + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint8) + v, changed := fastpathTV.DecMapStringUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint8) + fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]uint8, xlen) + changed = true + } + + var mk string + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint16) + v, changed := fastpathTV.DecMapStringUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint16) + fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]uint16, xlen) + changed = true + } + + var mk string + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint32) + v, changed := fastpathTV.DecMapStringUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint32) + fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]uint32, xlen) + changed = true + } + + var mk string + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uint64) + v, changed := fastpathTV.DecMapStringUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uint64) + fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint64, xlen) + changed = true + } + + var mk string + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]uintptr) + v, changed := fastpathTV.DecMapStringUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]uintptr) + fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[string]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uintptr, xlen) + changed = true + } + + var mk string + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int) + v, changed := fastpathTV.DecMapStringIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int) + fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringIntX(vp *map[string]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int, xlen) + changed = true + } + + var mk string + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int8) + v, changed := fastpathTV.DecMapStringInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int8) + fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]int8, xlen) + changed = true + } + + var mk string + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int16) + v, changed := fastpathTV.DecMapStringInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int16) + fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]int16, xlen) + changed = true + } + + var mk string + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int32) + v, changed := fastpathTV.DecMapStringInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int32) + fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]int32, xlen) + changed = true + } + + var mk string + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]int64) + v, changed := fastpathTV.DecMapStringInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]int64) + fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[string]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int64, xlen) + changed = true + } + + var mk string + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]float32) + v, changed := fastpathTV.DecMapStringFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]float32) + fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[string]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]float32, xlen) + changed = true + } + + var mk string + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]float64) + v, changed := fastpathTV.DecMapStringFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]float64) + fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[string]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]float64, xlen) + changed = true + } + + var mk string + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapStringBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[string]bool) + v, changed := fastpathTV.DecMapStringBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[string]bool) + fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapStringBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[string]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]bool, xlen) + changed = true + } + + var mk string + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeString() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]interface{}) + v, changed := fastpathTV.DecMapFloat32IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]interface{}) + fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk float32 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]string) + v, changed := fastpathTV.DecMapFloat32StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]string) + fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]string, xlen) + changed = true + } + + var mk float32 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint) + v, changed := fastpathTV.DecMapFloat32UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint) + fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint, xlen) + changed = true + } + + var mk float32 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint8) + v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint8) + fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]uint8, xlen) + changed = true + } + + var mk float32 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint16) + v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint16) + fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]uint16, xlen) + changed = true + } + + var mk float32 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint32) + v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint32) + fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]uint32, xlen) + changed = true + } + + var mk float32 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uint64) + v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uint64) + fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint64, xlen) + changed = true + } + + var mk float32 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]uintptr) + v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]uintptr) + fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uintptr, xlen) + changed = true + } + + var mk float32 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int) + v, changed := fastpathTV.DecMapFloat32IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int) + fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int, xlen) + changed = true + } + + var mk float32 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int8) + v, changed := fastpathTV.DecMapFloat32Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int8) + fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]int8, xlen) + changed = true + } + + var mk float32 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int16) + v, changed := fastpathTV.DecMapFloat32Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int16) + fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]int16, xlen) + changed = true + } + + var mk float32 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int32) + v, changed := fastpathTV.DecMapFloat32Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int32) + fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]int32, xlen) + changed = true + } + + var mk float32 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]int64) + v, changed := fastpathTV.DecMapFloat32Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]int64) + fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int64, xlen) + changed = true + } + + var mk float32 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]float32) + v, changed := fastpathTV.DecMapFloat32Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]float32) + fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]float32, xlen) + changed = true + } + + var mk float32 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]float64) + v, changed := fastpathTV.DecMapFloat32Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]float64) + fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]float64, xlen) + changed = true + } + + var mk float32 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat32BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float32]bool) + v, changed := fastpathTV.DecMapFloat32BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float32]bool) + fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat32BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[float32]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]bool, xlen) + changed = true + } + + var mk float32 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = float32(dd.DecodeFloat(true)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]interface{}) + v, changed := fastpathTV.DecMapFloat64IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]interface{}) + fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk float64 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]string) + v, changed := fastpathTV.DecMapFloat64StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]string) + fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]string, xlen) + changed = true + } + + var mk float64 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint) + v, changed := fastpathTV.DecMapFloat64UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint) + fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint, xlen) + changed = true + } + + var mk float64 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint8) + v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint8) + fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]uint8, xlen) + changed = true + } + + var mk float64 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint16) + v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint16) + fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]uint16, xlen) + changed = true + } + + var mk float64 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint32) + v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint32) + fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]uint32, xlen) + changed = true + } + + var mk float64 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uint64) + v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uint64) + fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint64, xlen) + changed = true + } + + var mk float64 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]uintptr) + v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]uintptr) + fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uintptr, xlen) + changed = true + } + + var mk float64 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int) + v, changed := fastpathTV.DecMapFloat64IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int) + fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int, xlen) + changed = true + } + + var mk float64 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int8) + v, changed := fastpathTV.DecMapFloat64Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int8) + fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]int8, xlen) + changed = true + } + + var mk float64 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int16) + v, changed := fastpathTV.DecMapFloat64Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int16) + fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]int16, xlen) + changed = true + } + + var mk float64 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int32) + v, changed := fastpathTV.DecMapFloat64Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int32) + fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]int32, xlen) + changed = true + } + + var mk float64 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]int64) + v, changed := fastpathTV.DecMapFloat64Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]int64) + fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int64, xlen) + changed = true + } + + var mk float64 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]float32) + v, changed := fastpathTV.DecMapFloat64Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]float32) + fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]float32, xlen) + changed = true + } + + var mk float64 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]float64) + v, changed := fastpathTV.DecMapFloat64Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]float64) + fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]float64, xlen) + changed = true + } + + var mk float64 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapFloat64BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[float64]bool) + v, changed := fastpathTV.DecMapFloat64BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[float64]bool) + fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapFloat64BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[float64]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]bool, xlen) + changed = true + } + + var mk float64 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeFloat(false) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]interface{}) + v, changed := fastpathTV.DecMapUintIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]interface{}) + fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]string) + v, changed := fastpathTV.DecMapUintStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]string) + fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintStringX(vp *map[uint]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]string, xlen) + changed = true + } + + var mk uint + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint) + v, changed := fastpathTV.DecMapUintUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint) + fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint, xlen) + changed = true + } + + var mk uint + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint8) + v, changed := fastpathTV.DecMapUintUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint8) + fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]uint8, xlen) + changed = true + } + + var mk uint + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint16) + v, changed := fastpathTV.DecMapUintUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint16) + fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]uint16, xlen) + changed = true + } + + var mk uint + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint32) + v, changed := fastpathTV.DecMapUintUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint32) + fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]uint32, xlen) + changed = true + } + + var mk uint + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uint64) + v, changed := fastpathTV.DecMapUintUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uint64) + fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint64, xlen) + changed = true + } + + var mk uint + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]uintptr) + v, changed := fastpathTV.DecMapUintUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]uintptr) + fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uintptr, xlen) + changed = true + } + + var mk uint + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int) + v, changed := fastpathTV.DecMapUintIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int) + fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintIntX(vp *map[uint]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int, xlen) + changed = true + } + + var mk uint + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int8) + v, changed := fastpathTV.DecMapUintInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int8) + fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]int8, xlen) + changed = true + } + + var mk uint + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int16) + v, changed := fastpathTV.DecMapUintInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int16) + fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]int16, xlen) + changed = true + } + + var mk uint + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int32) + v, changed := fastpathTV.DecMapUintInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int32) + fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]int32, xlen) + changed = true + } + + var mk uint + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]int64) + v, changed := fastpathTV.DecMapUintInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]int64) + fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int64, xlen) + changed = true + } + + var mk uint + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]float32) + v, changed := fastpathTV.DecMapUintFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]float32) + fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]float32, xlen) + changed = true + } + + var mk uint + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]float64) + v, changed := fastpathTV.DecMapUintFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]float64) + fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]float64, xlen) + changed = true + } + + var mk uint + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint]bool) + v, changed := fastpathTV.DecMapUintBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint]bool) + fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]bool, xlen) + changed = true + } + + var mk uint + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]interface{}) + v, changed := fastpathTV.DecMapUint8IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]interface{}) + fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint8 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]string) + v, changed := fastpathTV.DecMapUint8StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]string) + fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]string, xlen) + changed = true + } + + var mk uint8 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint) + v, changed := fastpathTV.DecMapUint8UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint) + fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint, xlen) + changed = true + } + + var mk uint8 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint8) + v, changed := fastpathTV.DecMapUint8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint8) + fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]uint8, xlen) + changed = true + } + + var mk uint8 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint16) + v, changed := fastpathTV.DecMapUint8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint16) + fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]uint16, xlen) + changed = true + } + + var mk uint8 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint32) + v, changed := fastpathTV.DecMapUint8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint32) + fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]uint32, xlen) + changed = true + } + + var mk uint8 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uint64) + v, changed := fastpathTV.DecMapUint8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uint64) + fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint64, xlen) + changed = true + } + + var mk uint8 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]uintptr) + v, changed := fastpathTV.DecMapUint8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]uintptr) + fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uintptr, xlen) + changed = true + } + + var mk uint8 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int) + v, changed := fastpathTV.DecMapUint8IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int) + fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int, xlen) + changed = true + } + + var mk uint8 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int8) + v, changed := fastpathTV.DecMapUint8Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int8) + fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]int8, xlen) + changed = true + } + + var mk uint8 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int16) + v, changed := fastpathTV.DecMapUint8Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int16) + fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]int16, xlen) + changed = true + } + + var mk uint8 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int32) + v, changed := fastpathTV.DecMapUint8Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int32) + fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]int32, xlen) + changed = true + } + + var mk uint8 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]int64) + v, changed := fastpathTV.DecMapUint8Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]int64) + fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int64, xlen) + changed = true + } + + var mk uint8 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]float32) + v, changed := fastpathTV.DecMapUint8Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]float32) + fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]float32, xlen) + changed = true + } + + var mk uint8 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]float64) + v, changed := fastpathTV.DecMapUint8Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]float64) + fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]float64, xlen) + changed = true + } + + var mk uint8 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint8BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint8]bool) + v, changed := fastpathTV.DecMapUint8BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint8]bool) + fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint8BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint8]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]bool, xlen) + changed = true + } + + var mk uint8 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint8(dd.DecodeUint(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]interface{}) + v, changed := fastpathTV.DecMapUint16IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]interface{}) + fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint16 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]string) + v, changed := fastpathTV.DecMapUint16StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]string) + fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]string, xlen) + changed = true + } + + var mk uint16 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint) + v, changed := fastpathTV.DecMapUint16UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint) + fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint, xlen) + changed = true + } + + var mk uint16 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint8) + v, changed := fastpathTV.DecMapUint16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint8) + fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]uint8, xlen) + changed = true + } + + var mk uint16 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint16) + v, changed := fastpathTV.DecMapUint16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint16) + fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]uint16, xlen) + changed = true + } + + var mk uint16 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint32) + v, changed := fastpathTV.DecMapUint16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint32) + fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]uint32, xlen) + changed = true + } + + var mk uint16 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uint64) + v, changed := fastpathTV.DecMapUint16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uint64) + fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint64, xlen) + changed = true + } + + var mk uint16 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]uintptr) + v, changed := fastpathTV.DecMapUint16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]uintptr) + fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uintptr, xlen) + changed = true + } + + var mk uint16 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int) + v, changed := fastpathTV.DecMapUint16IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int) + fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int, xlen) + changed = true + } + + var mk uint16 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int8) + v, changed := fastpathTV.DecMapUint16Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int8) + fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]int8, xlen) + changed = true + } + + var mk uint16 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int16) + v, changed := fastpathTV.DecMapUint16Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int16) + fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]int16, xlen) + changed = true + } + + var mk uint16 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int32) + v, changed := fastpathTV.DecMapUint16Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int32) + fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]int32, xlen) + changed = true + } + + var mk uint16 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]int64) + v, changed := fastpathTV.DecMapUint16Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]int64) + fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int64, xlen) + changed = true + } + + var mk uint16 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]float32) + v, changed := fastpathTV.DecMapUint16Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]float32) + fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]float32, xlen) + changed = true + } + + var mk uint16 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]float64) + v, changed := fastpathTV.DecMapUint16Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]float64) + fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]float64, xlen) + changed = true + } + + var mk uint16 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint16BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint16]bool) + v, changed := fastpathTV.DecMapUint16BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint16]bool) + fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint16BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint16]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]bool, xlen) + changed = true + } + + var mk uint16 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint16(dd.DecodeUint(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]interface{}) + v, changed := fastpathTV.DecMapUint32IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]interface{}) + fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint32 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]string) + v, changed := fastpathTV.DecMapUint32StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]string) + fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]string, xlen) + changed = true + } + + var mk uint32 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint) + v, changed := fastpathTV.DecMapUint32UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint) + fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint, xlen) + changed = true + } + + var mk uint32 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint8) + v, changed := fastpathTV.DecMapUint32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint8) + fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]uint8, xlen) + changed = true + } + + var mk uint32 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint16) + v, changed := fastpathTV.DecMapUint32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint16) + fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]uint16, xlen) + changed = true + } + + var mk uint32 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint32) + v, changed := fastpathTV.DecMapUint32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint32) + fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]uint32, xlen) + changed = true + } + + var mk uint32 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uint64) + v, changed := fastpathTV.DecMapUint32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uint64) + fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint64, xlen) + changed = true + } + + var mk uint32 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]uintptr) + v, changed := fastpathTV.DecMapUint32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]uintptr) + fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uintptr, xlen) + changed = true + } + + var mk uint32 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int) + v, changed := fastpathTV.DecMapUint32IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int) + fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int, xlen) + changed = true + } + + var mk uint32 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int8) + v, changed := fastpathTV.DecMapUint32Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int8) + fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]int8, xlen) + changed = true + } + + var mk uint32 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int16) + v, changed := fastpathTV.DecMapUint32Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int16) + fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]int16, xlen) + changed = true + } + + var mk uint32 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int32) + v, changed := fastpathTV.DecMapUint32Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int32) + fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]int32, xlen) + changed = true + } + + var mk uint32 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]int64) + v, changed := fastpathTV.DecMapUint32Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]int64) + fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int64, xlen) + changed = true + } + + var mk uint32 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]float32) + v, changed := fastpathTV.DecMapUint32Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]float32) + fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]float32, xlen) + changed = true + } + + var mk uint32 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]float64) + v, changed := fastpathTV.DecMapUint32Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]float64) + fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]float64, xlen) + changed = true + } + + var mk uint32 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint32BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint32]bool) + v, changed := fastpathTV.DecMapUint32BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint32]bool) + fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint32BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint32]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]bool, xlen) + changed = true + } + + var mk uint32 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uint32(dd.DecodeUint(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]interface{}) + v, changed := fastpathTV.DecMapUint64IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]interface{}) + fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint64 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]string) + v, changed := fastpathTV.DecMapUint64StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]string) + fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]string, xlen) + changed = true + } + + var mk uint64 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint) + v, changed := fastpathTV.DecMapUint64UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint) + fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint, xlen) + changed = true + } + + var mk uint64 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint8) + v, changed := fastpathTV.DecMapUint64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint8) + fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]uint8, xlen) + changed = true + } + + var mk uint64 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint16) + v, changed := fastpathTV.DecMapUint64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint16) + fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]uint16, xlen) + changed = true + } + + var mk uint64 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint32) + v, changed := fastpathTV.DecMapUint64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint32) + fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]uint32, xlen) + changed = true + } + + var mk uint64 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uint64) + v, changed := fastpathTV.DecMapUint64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uint64) + fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint64, xlen) + changed = true + } + + var mk uint64 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]uintptr) + v, changed := fastpathTV.DecMapUint64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]uintptr) + fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uintptr, xlen) + changed = true + } + + var mk uint64 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int) + v, changed := fastpathTV.DecMapUint64IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int) + fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int, xlen) + changed = true + } + + var mk uint64 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int8) + v, changed := fastpathTV.DecMapUint64Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int8) + fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]int8, xlen) + changed = true + } + + var mk uint64 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int16) + v, changed := fastpathTV.DecMapUint64Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int16) + fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]int16, xlen) + changed = true + } + + var mk uint64 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int32) + v, changed := fastpathTV.DecMapUint64Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int32) + fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]int32, xlen) + changed = true + } + + var mk uint64 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]int64) + v, changed := fastpathTV.DecMapUint64Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]int64) + fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int64, xlen) + changed = true + } + + var mk uint64 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]float32) + v, changed := fastpathTV.DecMapUint64Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]float32) + fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]float32, xlen) + changed = true + } + + var mk uint64 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]float64) + v, changed := fastpathTV.DecMapUint64Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]float64) + fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]float64, xlen) + changed = true + } + + var mk uint64 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUint64BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uint64]bool) + v, changed := fastpathTV.DecMapUint64BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uint64]bool) + fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUint64BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uint64]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]bool, xlen) + changed = true + } + + var mk uint64 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeUint(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]interface{}) + v, changed := fastpathTV.DecMapUintptrIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]interface{}) + fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uintptr + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]string) + v, changed := fastpathTV.DecMapUintptrStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]string) + fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]string, xlen) + changed = true + } + + var mk uintptr + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint) + v, changed := fastpathTV.DecMapUintptrUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint) + fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint, xlen) + changed = true + } + + var mk uintptr + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint8) + v, changed := fastpathTV.DecMapUintptrUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint8) + fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]uint8, xlen) + changed = true + } + + var mk uintptr + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint16) + v, changed := fastpathTV.DecMapUintptrUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint16) + fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]uint16, xlen) + changed = true + } + + var mk uintptr + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint32) + v, changed := fastpathTV.DecMapUintptrUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint32) + fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]uint32, xlen) + changed = true + } + + var mk uintptr + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uint64) + v, changed := fastpathTV.DecMapUintptrUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uint64) + fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint64, xlen) + changed = true + } + + var mk uintptr + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]uintptr) + v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]uintptr) + fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uintptr, xlen) + changed = true + } + + var mk uintptr + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int) + v, changed := fastpathTV.DecMapUintptrIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int) + fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int, xlen) + changed = true + } + + var mk uintptr + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int8) + v, changed := fastpathTV.DecMapUintptrInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int8) + fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]int8, xlen) + changed = true + } + + var mk uintptr + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int16) + v, changed := fastpathTV.DecMapUintptrInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int16) + fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]int16, xlen) + changed = true + } + + var mk uintptr + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int32) + v, changed := fastpathTV.DecMapUintptrInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int32) + fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]int32, xlen) + changed = true + } + + var mk uintptr + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]int64) + v, changed := fastpathTV.DecMapUintptrInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]int64) + fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int64, xlen) + changed = true + } + + var mk uintptr + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]float32) + v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]float32) + fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]float32, xlen) + changed = true + } + + var mk uintptr + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]float64) + v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]float64) + fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]float64, xlen) + changed = true + } + + var mk uintptr + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapUintptrBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[uintptr]bool) + v, changed := fastpathTV.DecMapUintptrBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[uintptr]bool) + fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapUintptrBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[uintptr]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]bool, xlen) + changed = true + } + + var mk uintptr + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]interface{}) + v, changed := fastpathTV.DecMapIntIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]interface{}) + fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]string) + v, changed := fastpathTV.DecMapIntStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]string) + fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntStringX(vp *map[int]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]string, xlen) + changed = true + } + + var mk int + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint) + v, changed := fastpathTV.DecMapIntUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint) + fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUintX(vp *map[int]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint, xlen) + changed = true + } + + var mk int + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint8) + v, changed := fastpathTV.DecMapIntUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint8) + fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]uint8, xlen) + changed = true + } + + var mk int + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint16) + v, changed := fastpathTV.DecMapIntUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint16) + fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]uint16, xlen) + changed = true + } + + var mk int + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint32) + v, changed := fastpathTV.DecMapIntUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint32) + fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]uint32, xlen) + changed = true + } + + var mk int + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uint64) + v, changed := fastpathTV.DecMapIntUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uint64) + fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint64, xlen) + changed = true + } + + var mk int + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]uintptr) + v, changed := fastpathTV.DecMapIntUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]uintptr) + fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uintptr, xlen) + changed = true + } + + var mk int + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int) + v, changed := fastpathTV.DecMapIntIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int) + fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntIntX(vp *map[int]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int, xlen) + changed = true + } + + var mk int + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int8) + v, changed := fastpathTV.DecMapIntInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int8) + fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]int8, xlen) + changed = true + } + + var mk int + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int16) + v, changed := fastpathTV.DecMapIntInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int16) + fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]int16, xlen) + changed = true + } + + var mk int + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int32) + v, changed := fastpathTV.DecMapIntInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int32) + fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]int32, xlen) + changed = true + } + + var mk int + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]int64) + v, changed := fastpathTV.DecMapIntInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]int64) + fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int64, xlen) + changed = true + } + + var mk int + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]float32) + v, changed := fastpathTV.DecMapIntFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]float32) + fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]float32, xlen) + changed = true + } + + var mk int + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]float64) + v, changed := fastpathTV.DecMapIntFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]float64) + fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]float64, xlen) + changed = true + } + + var mk int + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapIntBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int]bool) + v, changed := fastpathTV.DecMapIntBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int]bool) + fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapIntBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]bool, xlen) + changed = true + } + + var mk int + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int(dd.DecodeInt(intBitsize)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]interface{}) + v, changed := fastpathTV.DecMapInt8IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]interface{}) + fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int8 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]string) + v, changed := fastpathTV.DecMapInt8StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]string) + fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]string, xlen) + changed = true + } + + var mk int8 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint) + v, changed := fastpathTV.DecMapInt8UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint) + fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint, xlen) + changed = true + } + + var mk int8 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint8) + v, changed := fastpathTV.DecMapInt8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint8) + fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]uint8, xlen) + changed = true + } + + var mk int8 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint16) + v, changed := fastpathTV.DecMapInt8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint16) + fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]uint16, xlen) + changed = true + } + + var mk int8 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint32) + v, changed := fastpathTV.DecMapInt8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint32) + fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]uint32, xlen) + changed = true + } + + var mk int8 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uint64) + v, changed := fastpathTV.DecMapInt8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uint64) + fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint64, xlen) + changed = true + } + + var mk int8 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]uintptr) + v, changed := fastpathTV.DecMapInt8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]uintptr) + fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uintptr, xlen) + changed = true + } + + var mk int8 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int) + v, changed := fastpathTV.DecMapInt8IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int) + fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int, xlen) + changed = true + } + + var mk int8 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int8) + v, changed := fastpathTV.DecMapInt8Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int8) + fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]int8, xlen) + changed = true + } + + var mk int8 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int16) + v, changed := fastpathTV.DecMapInt8Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int16) + fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]int16, xlen) + changed = true + } + + var mk int8 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int32) + v, changed := fastpathTV.DecMapInt8Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int32) + fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]int32, xlen) + changed = true + } + + var mk int8 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]int64) + v, changed := fastpathTV.DecMapInt8Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]int64) + fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int64, xlen) + changed = true + } + + var mk int8 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]float32) + v, changed := fastpathTV.DecMapInt8Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]float32) + fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]float32, xlen) + changed = true + } + + var mk int8 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]float64) + v, changed := fastpathTV.DecMapInt8Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]float64) + fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]float64, xlen) + changed = true + } + + var mk int8 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt8BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int8]bool) + v, changed := fastpathTV.DecMapInt8BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int8]bool) + fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt8BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int8]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]bool, xlen) + changed = true + } + + var mk int8 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int8(dd.DecodeInt(8)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]interface{}) + v, changed := fastpathTV.DecMapInt16IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]interface{}) + fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int16 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]string) + v, changed := fastpathTV.DecMapInt16StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]string) + fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]string, xlen) + changed = true + } + + var mk int16 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint) + v, changed := fastpathTV.DecMapInt16UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint) + fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint, xlen) + changed = true + } + + var mk int16 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint8) + v, changed := fastpathTV.DecMapInt16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint8) + fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]uint8, xlen) + changed = true + } + + var mk int16 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint16) + v, changed := fastpathTV.DecMapInt16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint16) + fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]uint16, xlen) + changed = true + } + + var mk int16 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint32) + v, changed := fastpathTV.DecMapInt16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint32) + fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]uint32, xlen) + changed = true + } + + var mk int16 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uint64) + v, changed := fastpathTV.DecMapInt16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uint64) + fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint64, xlen) + changed = true + } + + var mk int16 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]uintptr) + v, changed := fastpathTV.DecMapInt16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]uintptr) + fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uintptr, xlen) + changed = true + } + + var mk int16 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int) + v, changed := fastpathTV.DecMapInt16IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int) + fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int, xlen) + changed = true + } + + var mk int16 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int8) + v, changed := fastpathTV.DecMapInt16Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int8) + fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]int8, xlen) + changed = true + } + + var mk int16 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int16) + v, changed := fastpathTV.DecMapInt16Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int16) + fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]int16, xlen) + changed = true + } + + var mk int16 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int32) + v, changed := fastpathTV.DecMapInt16Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int32) + fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]int32, xlen) + changed = true + } + + var mk int16 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]int64) + v, changed := fastpathTV.DecMapInt16Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]int64) + fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int64, xlen) + changed = true + } + + var mk int16 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]float32) + v, changed := fastpathTV.DecMapInt16Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]float32) + fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]float32, xlen) + changed = true + } + + var mk int16 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]float64) + v, changed := fastpathTV.DecMapInt16Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]float64) + fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]float64, xlen) + changed = true + } + + var mk int16 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt16BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int16]bool) + v, changed := fastpathTV.DecMapInt16BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int16]bool) + fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt16BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int16]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]bool, xlen) + changed = true + } + + var mk int16 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int16(dd.DecodeInt(16)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]interface{}) + v, changed := fastpathTV.DecMapInt32IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]interface{}) + fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int32 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]string) + v, changed := fastpathTV.DecMapInt32StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]string) + fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]string, xlen) + changed = true + } + + var mk int32 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint) + v, changed := fastpathTV.DecMapInt32UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint) + fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint, xlen) + changed = true + } + + var mk int32 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint8) + v, changed := fastpathTV.DecMapInt32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint8) + fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]uint8, xlen) + changed = true + } + + var mk int32 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint16) + v, changed := fastpathTV.DecMapInt32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint16) + fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]uint16, xlen) + changed = true + } + + var mk int32 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint32) + v, changed := fastpathTV.DecMapInt32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint32) + fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]uint32, xlen) + changed = true + } + + var mk int32 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uint64) + v, changed := fastpathTV.DecMapInt32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uint64) + fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint64, xlen) + changed = true + } + + var mk int32 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]uintptr) + v, changed := fastpathTV.DecMapInt32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]uintptr) + fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uintptr, xlen) + changed = true + } + + var mk int32 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int) + v, changed := fastpathTV.DecMapInt32IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int) + fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int, xlen) + changed = true + } + + var mk int32 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int8) + v, changed := fastpathTV.DecMapInt32Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int8) + fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]int8, xlen) + changed = true + } + + var mk int32 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int16) + v, changed := fastpathTV.DecMapInt32Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int16) + fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]int16, xlen) + changed = true + } + + var mk int32 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int32) + v, changed := fastpathTV.DecMapInt32Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int32) + fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]int32, xlen) + changed = true + } + + var mk int32 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]int64) + v, changed := fastpathTV.DecMapInt32Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]int64) + fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int64, xlen) + changed = true + } + + var mk int32 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]float32) + v, changed := fastpathTV.DecMapInt32Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]float32) + fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]float32, xlen) + changed = true + } + + var mk int32 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]float64) + v, changed := fastpathTV.DecMapInt32Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]float64) + fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]float64, xlen) + changed = true + } + + var mk int32 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt32BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int32]bool) + v, changed := fastpathTV.DecMapInt32BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int32]bool) + fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt32BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int32]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]bool, xlen) + changed = true + } + + var mk int32 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = int32(dd.DecodeInt(32)) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64IntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]interface{}) + v, changed := fastpathTV.DecMapInt64IntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]interface{}) + fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64IntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int64 + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64StringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]string) + v, changed := fastpathTV.DecMapInt64StringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]string) + fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64StringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]string, xlen) + changed = true + } + + var mk int64 + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64UintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint) + v, changed := fastpathTV.DecMapInt64UintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint) + fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64UintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint, xlen) + changed = true + } + + var mk int64 + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint8) + v, changed := fastpathTV.DecMapInt64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint8) + fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]uint8, xlen) + changed = true + } + + var mk int64 + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint16) + v, changed := fastpathTV.DecMapInt64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint16) + fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]uint16, xlen) + changed = true + } + + var mk int64 + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint32) + v, changed := fastpathTV.DecMapInt64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint32) + fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]uint32, xlen) + changed = true + } + + var mk int64 + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Uint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uint64) + v, changed := fastpathTV.DecMapInt64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uint64) + fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Uint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint64, xlen) + changed = true + } + + var mk int64 + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64UintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]uintptr) + v, changed := fastpathTV.DecMapInt64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]uintptr) + fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64UintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uintptr, xlen) + changed = true + } + + var mk int64 + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64IntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int) + v, changed := fastpathTV.DecMapInt64IntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int) + fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64IntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int, xlen) + changed = true + } + + var mk int64 + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int8) + v, changed := fastpathTV.DecMapInt64Int8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int8) + fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]int8, xlen) + changed = true + } + + var mk int64 + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int16) + v, changed := fastpathTV.DecMapInt64Int16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int16) + fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]int16, xlen) + changed = true + } + + var mk int64 + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int32) + v, changed := fastpathTV.DecMapInt64Int32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int32) + fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]int32, xlen) + changed = true + } + + var mk int64 + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Int64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]int64) + v, changed := fastpathTV.DecMapInt64Int64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]int64) + fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Int64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int64, xlen) + changed = true + } + + var mk int64 + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Float32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]float32) + v, changed := fastpathTV.DecMapInt64Float32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]float32) + fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Float32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]float32, xlen) + changed = true + } + + var mk int64 + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64Float64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]float64) + v, changed := fastpathTV.DecMapInt64Float64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]float64) + fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64Float64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]float64, xlen) + changed = true + } + + var mk int64 + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapInt64BoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[int64]bool) + v, changed := fastpathTV.DecMapInt64BoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[int64]bool) + fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapInt64BoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[int64]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]bool, xlen) + changed = true + } + + var mk int64 + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeInt(64) + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolIntfR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]interface{}) + v, changed := fastpathTV.DecMapBoolIntfV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]interface{}) + fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolIntfV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]interface{}, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]interface{}, xlen) + changed = true + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk bool + var mv interface{} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolStringR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]string) + v, changed := fastpathTV.DecMapBoolStringV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]string) + fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolStringV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]string, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]string, xlen) + changed = true + } + + var mk bool + var mv string + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUintR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint) + v, changed := fastpathTV.DecMapBoolUintV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint) + fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUintV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint, xlen) + changed = true + } + + var mk bool + var mv uint + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint8) + v, changed := fastpathTV.DecMapBoolUint8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint8) + fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]uint8, xlen) + changed = true + } + + var mk bool + var mv uint8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint16) + v, changed := fastpathTV.DecMapBoolUint16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint16) + fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]uint16, xlen) + changed = true + } + + var mk bool + var mv uint16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint32) + v, changed := fastpathTV.DecMapBoolUint32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint32) + fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]uint32, xlen) + changed = true + } + + var mk bool + var mv uint32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUint64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uint64) + v, changed := fastpathTV.DecMapBoolUint64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uint64) + fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUint64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uint64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint64, xlen) + changed = true + } + + var mk bool + var mv uint64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolUintptrR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]uintptr) + v, changed := fastpathTV.DecMapBoolUintptrV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]uintptr) + fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolUintptrV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]uintptr, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uintptr, xlen) + changed = true + } + + var mk bool + var mv uintptr + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolIntR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int) + v, changed := fastpathTV.DecMapBoolIntV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int) + fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolIntV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int, xlen) + changed = true + } + + var mk bool + var mv int + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt8R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int8) + v, changed := fastpathTV.DecMapBoolInt8V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int8) + fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt8V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int8, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]int8, xlen) + changed = true + } + + var mk bool + var mv int8 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt16R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int16) + v, changed := fastpathTV.DecMapBoolInt16V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int16) + fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt16V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int16, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]int16, xlen) + changed = true + } + + var mk bool + var mv int16 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int32) + v, changed := fastpathTV.DecMapBoolInt32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int32) + fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]int32, xlen) + changed = true + } + + var mk bool + var mv int32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolInt64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]int64) + v, changed := fastpathTV.DecMapBoolInt64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]int64) + fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolInt64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]int64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int64, xlen) + changed = true + } + + var mk bool + var mv int64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolFloat32R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]float32) + v, changed := fastpathTV.DecMapBoolFloat32V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]float32) + fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolFloat32V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]float32, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]float32, xlen) + changed = true + } + + var mk bool + var mv float32 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolFloat64R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]float64) + v, changed := fastpathTV.DecMapBoolFloat64V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]float64) + fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolFloat64V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]float64, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]float64, xlen) + changed = true + } + + var mk bool + var mv float64 + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} + +func (f *decFnInfo) fastpathDecMapBoolBoolR(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[bool]bool) + v, changed := fastpathTV.DecMapBoolBoolV(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[bool]bool) + fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, checkNil bool, d *Decoder) { + v, changed := f.DecMapBoolBoolV(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange bool, + d *Decoder) (_ map[bool]bool, changed bool) { + dd := d.d + cr := d.cr + + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]bool, xlen) + changed = true + } + + var mk bool + var mv bool + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { + cr.sendContainerState(containerMapKey) + } + mk = dd.DecodeBool() + if cr != nil { + cr.sendContainerState(containerMapValue) + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } + return v, changed +} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl new file mode 100644 index 0000000..c3ffdf9 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl @@ -0,0 +1,527 @@ +// +build !notfastpath + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl +// ************************************************************ + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathEnabled = true + +const fastpathCheckNilFalse = false // for reflect +const fastpathCheckNilTrue = true // for type switch + +type fastpathT struct {} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*encFnInfo, reflect.Value) + decfn func(*decFnInfo, reflect.Value) +} + +type fastpathA [{{ .FastpathLen }}]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + h, i, j := 0, 0, {{ .FastpathLen }} // len(x) + for i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < {{ .FastpathLen }} && x[i].rtid == rtid { + return i + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } +func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + i := 0 + fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { + xrt := reflect.TypeOf(v) + xptr := reflect.ValueOf(xrt).Pointer() + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + return + } + + {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} + + {{range .Values}}{{if not .Primitive}}{{if .MapKey }} + fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case []{{ .Elem }}:{{else}} + case map[{{ .MapKey }}]{{ .Elem }}:{{end}} + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }} + case *[]{{ .Elem }}:{{else}} + case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) +{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case []{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) + case *[]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) +{{end}}{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + case map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) + case *map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) +{{end}}{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + +func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { + if f.ti.mbs { + fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) + } +} +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeArrayStart(len(v)) + for _, v2 := range v { + if cr != nil { cr.sendContainerState(containerArrayElem) } + {{ encmd .Elem "v2"}} + } + if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}} +} + +func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + {{ encmd .Elem "v2"}} + } + if cr != nil { cr.sendContainerState(containerMapEnd) } +} + +{{end}}{{end}}{{end}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + +func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e) +} +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + ee.EncodeMapStart(len(v)) + {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + {{end}}if e.h.Canonical { + {{if eq .MapKey "interface{}"}}{{/* out of band + */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}} + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if cr != nil { cr.sendContainerState(containerMapKey) } + e.asis(v2[j].v) + if cr != nil { cr.sendContainerState(containerMapValue) } + e.encode(v[v2[j].i]) + } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) + var i int + for k, _ := range v { + v2[i] = {{ $x }}(k) + i++ + } + sort.Sort({{ sorttype .MapKey false}}(v2)) + for _, k2 := range v2 { + if cr != nil { cr.sendContainerState(containerMapKey) } + {{if eq .MapKey "string"}}if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + }{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} + if cr != nil { cr.sendContainerState(containerMapValue) } + {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} + } {{end}} + } else { + for k2, v2 := range v { + if cr != nil { cr.sendContainerState(containerMapKey) } + {{if eq .MapKey "string"}}if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + }{{else}}{{ encmd .MapKey "k2"}}{{end}} + if cr != nil { cr.sendContainerState(containerMapValue) } + {{ encmd .Elem "v2"}} + } + } + if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}} +} + +{{end}}{{end}}{{end}} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case []{{ .Elem }}:{{else}} + case map[{{ .MapKey }}]{{ .Elem }}:{{end}} + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }} + case *[]{{ .Elem }}:{{else}} + case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} + v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d) + if changed2 { + *v = v2 + } +{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} +{{/* +Slices can change if they +- did not come from an array +- are addressable (from a ptr) +- are settable (e.g. contained in an interface{}) +*/}} +func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { + array := f.seq == seqTypeArray + if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}} + vp := rv.Addr().Interface().(*[]{{ .Elem }}) + v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().([]{{ .Elem }}) + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) + } +} + +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) { + v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { + dd := d.d + {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}} + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []{{ .Elem }}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + if containerLenS > 0 { + x2read := containerLenS + var xtrunc bool + if containerLenS > cap(v) { + if canChange { {{/* + // fast-path is for "basic" immutable types, so no need to copy them over + // s := make([]{{ .Elem }}, decInferLen(containerLenS, d.h.MaxInitLen)) + // copy(s, v[:cap(v)]) + // v = s */}} + var xlen int + xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) + if xtrunc { + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]{{ .Elem }}, xlen) + } + } else { + v = make([]{{ .Elem }}, xlen) + } + changed = true + } else { + d.arrayCannotExpand(len(v), containerLenS) + } + x2read = len(v) + } else if containerLenS != len(v) { + if canChange { + v = v[:containerLenS] + changed = true + } + } {{/* // all checks done. cannot go past len. */}} + j := 0 + for ; j < x2read; j++ { + slh.ElemContainerState(j) + {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} + } + if xtrunc { {{/* // means canChange=true, changed=true already. */}} + for ; j < containerLenS; j++ { + v = append(v, {{ zerocmd .Elem }}) + slh.ElemContainerState(j) + {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} + } + } else if !canChange { + for ; j < containerLenS; j++ { + slh.ElemContainerState(j) + d.swallow() + } + } + } else { + breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}} + if breakFound { + if canChange { + if v == nil { + v = []{{ .Elem }}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + if cap(v) == 0 { + v = make([]{{ .Elem }}, 1, 4) + changed = true + } + j := 0 + for ; !breakFound; j++ { + if j >= len(v) { + if canChange { + v = append(v, {{ zerocmd .Elem }}) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + } + } + slh.ElemContainerState(j) + if j < len(v) { {{/* // all checks done. cannot go past len. */}} + {{ if eq .Elem "interface{}" }}d.decode(&v[j]) + {{ else }}v[j] = {{ decmd .Elem }}{{ end }} + } else { + d.swallow() + } + breakFound = dd.CheckBreak() + } + if canChange && j < len(v) { + v = v[:j] + changed = true + } + } + slh.End() + return v, changed +} + +{{end}}{{end}}{{end}} + + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} +{{/* +Maps can change if they are +- addressable (from a ptr) +- settable (e.g. contained in an interface{}) +*/}} +func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { + if rv.CanAddr() { + vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }}) + v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d) + if changed { + *vp = v + } + } else { + v := rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}) + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) + } +} +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) { + v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool, + d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { + dd := d.d + cr := d.cr + {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}} + if checkNil && dd.TryDecodeAsNil() { + if v != nil { + changed = true + } + return nil, changed + } + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) + v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) + changed = true + } + {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}} + var mk {{ .MapKey }} + var mv {{ .Elem }} + if containerLen > 0 { + for j := 0; j < containerLen; j++ { + if cr != nil { cr.sendContainerState(containerMapKey) } + {{ if eq .MapKey "interface{}" }}mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} + }{{ else }}mk = {{ decmd .MapKey }}{{ end }} + if cr != nil { cr.sendContainerState(containerMapValue) } + {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } + d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} + if v != nil { + v[mk] = mv + } + } + } else if containerLen < 0 { + for j := 0; !dd.CheckBreak(); j++ { + if cr != nil { cr.sendContainerState(containerMapKey) } + {{ if eq .MapKey "interface{}" }}mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} + }{{ else }}mk = {{ decmd .MapKey }}{{ end }} + if cr != nil { cr.sendContainerState(containerMapValue) } + {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } + d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} + if v != nil { + v[mk] = mv + } + } + } + if cr != nil { cr.sendContainerState(containerMapEnd) } + return v, changed +} + +{{end}}{{end}}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go new file mode 100644 index 0000000..63e5911 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fast-path.not.go @@ -0,0 +1,34 @@ +// +build notfastpath + +package codec + +import "reflect" + +const fastpathEnabled = false + +// The generated fast-path code is very large, and adds a few seconds to the build time. +// This causes test execution, execution of small tools which use codec, etc +// to take a long time. +// +// To mitigate, we now support the notfastpath tag. +// This tag disables fastpath during build, allowing for faster build, test execution, +// short-program runs, etc. + +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } + +type fastpathT struct{} +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*encFnInfo, reflect.Value) + decfn func(*decFnInfo, reflect.Value) +} +type fastpathA [0]fastpathE + +func (x fastpathA) index(rtid uintptr) int { return -1 } + +var fastpathAV fastpathA +var fastpathTV fastpathT diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl new file mode 100644 index 0000000..32df541 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl @@ -0,0 +1,104 @@ +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{end}} {{if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else if {{var "l"}} > 0 { + {{if isChan }}if {{var "v"}} == nil { + {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) + {{var "c"}} = true + } + for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { + {{var "h"}}.ElemContainerState({{var "r"}}) + var {{var "t"}} {{ .Typ }} + {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} + {{var "v"}} <- {{var "t"}} + } + {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} + var {{var "rt"}} bool {{/* truncated */}} + _, _ = {{var "rl"}}, {{var "rt"}} + {{var "rr"}} = {{var "l"}} // len({{var "v"}}) + if {{var "l"}} > cap({{var "v"}}) { + {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) + {{ else }}{{if not .Immutable }} + {{var "rg"}} := len({{var "v"}}) > 0 + {{var "v2"}} := {{var "v"}} {{end}} + {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rt"}} { + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} + if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} + } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } {{end}} {{/* end isSlice:47 */}} + {{var "j"}} := 0 + for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { + {{var "h"}}.ElemContainerState({{var "j"}}) + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + {{var "h"}}.ElemContainerState({{var "j"}}) + z.DecSwallow() + } + {{ else }}if {{var "rt"}} { + for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + {{var "v"}} = append({{var "v"}}, {{ zero}}) + {{var "h"}}.ElemContainerState({{var "j"}}) + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + } {{end}} {{/* end isArray:56 */}} + {{end}} {{/* end isChan:16 */}} +} else { {{/* len < 0 */}} + {{var "j"}} := 0 + for ; !r.CheckBreak(); {{var "j"}}++ { + {{if isChan }} + {{var "h"}}.ElemContainerState({{var "j"}}) + var {{var "t"}} {{ .Typ }} + {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} + {{var "v"}} <- {{var "t"}} + {{ else }} + if {{var "j"}} >= len({{var "v"}}) { + {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) + {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} + {{var "c"}} = true {{end}} + } + {{var "h"}}.ElemContainerState({{var "j"}}) + if {{var "j"}} < len({{var "v"}}) { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } else { + z.DecSwallow() + } + {{end}} + } + {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + }{{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl new file mode 100644 index 0000000..77400e0 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl @@ -0,0 +1,58 @@ +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} > 0 { +for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { + z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} + if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} else if {{var "l"}} < 0 { +for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { + z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true {{ end }} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} + if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go new file mode 100644 index 0000000..eb0bdad --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go @@ -0,0 +1,243 @@ +/* // +build ignore */ + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl +// ************************************************************ + +package codec + +import ( + "encoding" + "reflect" +) + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { + return genHelperEncoder{e: e}, e.e +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { + return genHelperDecoder{d: d}, d.d +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + f.e.encodeI(iv, false, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshal(bs, fnerr, false, c_UTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshal(bs, fnerr, true, c_UTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshal(bs, fnerr, false, c_RAW) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { + f.e.raw(iv) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) TimeRtidIfBinc() uintptr { + if _, ok := f.e.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v) + if rt.Kind() == reflect.Ptr { + rt = rt.Elem() + } + rtid := reflect.ValueOf(rt).Pointer() + if xfFn := f.e.h.getExt(rtid); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncSendContainerState(c containerState) { + if f.e.cr != nil { + f.e.cr.sendContainerState(c) + } +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { + f.d.swallow() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + f.d.decodeI(iv, chkPtr, false, false, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeBytes(f.d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { + return f.d.raw() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) TimeRtidIfBinc() uintptr { + if _, ok := f.d.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v).Elem() + rtid := reflect.ValueOf(rt).Pointer() + if xfFn := f.d.h.getExt(rtid); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { + return decInferLen(clen, maxlen, unit) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSendContainerState(c containerState) { + if f.d.cr != nil { + f.d.cr.sendContainerState(c) + } +} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl new file mode 100644 index 0000000..ad99f66 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl @@ -0,0 +1,372 @@ +/* // +build ignore */ + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl +// ************************************************************ + +package codec + +import ( + "encoding" + "reflect" +) + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { + return genHelperEncoder{e:e}, e.e +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { + return genHelperDecoder{d:d}, d.d +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + f.e.encodeI(iv, false, false) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshal(bs, fnerr, false, c_UTF8) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshal(bs, fnerr, true, c_UTF8) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshal(bs, fnerr, false, c_RAW) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { + f.e.raw(iv) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) TimeRtidIfBinc() uintptr { + if _, ok := f.e.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v) + if rt.Kind() == reflect.Ptr { + rt = rt.Elem() + } + rtid := reflect.ValueOf(rt).Pointer() + if xfFn := f.e.h.getExt(rtid); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncSendContainerState(c containerState) { + if f.e.cr != nil { + f.e.cr.sendContainerState(c) + } +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { + f.d.swallow() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + f.d.decodeI(iv, chkPtr, false, false, false) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeBytes(f.d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { + return f.d.raw() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) TimeRtidIfBinc() uintptr { + if _, ok := f.d.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v).Elem() + rtid := reflect.ValueOf(rt).Pointer() + if xfFn := f.d.h.getExt(rtid); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { + return decInferLen(clen, maxlen, unit) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSendContainerState(c containerState) { + if f.d.cr != nil { + f.d.cr.sendContainerState(c) + } +} + +{{/* + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncDriver() encDriver { + return f.e.e +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecDriver() decDriver { + return f.d.d +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncNil() { + f.e.e.EncodeNil() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBytes(v []byte) { + f.e.e.EncodeStringBytes(c_RAW, v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncArrayStart(length int) { + f.e.e.EncodeArrayStart(length) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncArrayEnd() { + f.e.e.EncodeArrayEnd() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncArrayEntrySeparator() { + f.e.e.EncodeArrayEntrySeparator() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncMapStart(length int) { + f.e.e.EncodeMapStart(length) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncMapEnd() { + f.e.e.EncodeMapEnd() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncMapEntrySeparator() { + f.e.e.EncodeMapEntrySeparator() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncMapKVSeparator() { + f.e.e.EncodeMapKVSeparator() +} + +// --------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBytes(v *[]byte) { + *v = f.d.d.DecodeBytes(*v) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTryNil() bool { + return f.d.d.TryDecodeAsNil() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecContainerIsNil() (b bool) { + return f.d.d.IsContainerType(valueTypeNil) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecContainerIsMap() (b bool) { + return f.d.d.IsContainerType(valueTypeMap) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecContainerIsArray() (b bool) { + return f.d.d.IsContainerType(valueTypeArray) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecCheckBreak() bool { + return f.d.d.CheckBreak() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecMapStart() int { + return f.d.d.ReadMapStart() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayStart() int { + return f.d.d.ReadArrayStart() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecMapEnd() { + f.d.d.ReadMapEnd() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayEnd() { + f.d.d.ReadArrayEnd() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayEntrySeparator() { + f.d.d.ReadArrayEntrySeparator() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecMapEntrySeparator() { + f.d.d.ReadMapEntrySeparator() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecMapKVSeparator() { + f.d.d.ReadMapKVSeparator() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte { + return f.d.d.DecodeStringAsBytes(bs) +} + + +// -- encode calls (primitives) +{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) { + ee := f.e.e + {{ encmd .Primitive "v" }} +} +{{ end }}{{ end }}{{ end }} + +// -- decode calls (primitives) +{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) { + dd := f.d.d + *vp = {{ decmd .Primitive }} +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) { + dd := f.d.d + v = {{ decmd .Primitive }} + return +} +{{ end }}{{ end }}{{ end }} + + +// -- encode calls (slices/maps) +{{range .Values}}{{if not .Primitive }}{{if .Slice }} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}} + f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e) +} +{{ end }}{{ end }} + +// -- decode calls (slices/maps) +{{range .Values}}{{if not .Primitive }} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) { +{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}} + v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d) + if changed { + *vp = v + } +} +{{ end }}{{ end }} +*/}} diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go new file mode 100644 index 0000000..2ace97b --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen.generated.go @@ -0,0 +1,175 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl + +const genDecMapTmpl = ` +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} > 0 { +for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { + z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} + if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} else if {{var "l"}} < 0 { +for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { + z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true {{ end }} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} + if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) +` + +const genDecListTmpl = ` +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{end}} {{if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else if {{var "l"}} > 0 { + {{if isChan }}if {{var "v"}} == nil { + {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) + {{var "c"}} = true + } + for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { + {{var "h"}}.ElemContainerState({{var "r"}}) + var {{var "t"}} {{ .Typ }} + {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} + {{var "v"}} <- {{var "t"}} + } + {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} + var {{var "rt"}} bool {{/* truncated */}} + _, _ = {{var "rl"}}, {{var "rt"}} + {{var "rr"}} = {{var "l"}} // len({{var "v"}}) + if {{var "l"}} > cap({{var "v"}}) { + {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) + {{ else }}{{if not .Immutable }} + {{var "rg"}} := len({{var "v"}}) > 0 + {{var "v2"}} := {{var "v"}} {{end}} + {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rt"}} { + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} + if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} + } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } {{end}} {{/* end isSlice:47 */}} + {{var "j"}} := 0 + for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { + {{var "h"}}.ElemContainerState({{var "j"}}) + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + {{var "h"}}.ElemContainerState({{var "j"}}) + z.DecSwallow() + } + {{ else }}if {{var "rt"}} { + for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + {{var "v"}} = append({{var "v"}}, {{ zero}}) + {{var "h"}}.ElemContainerState({{var "j"}}) + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + } {{end}} {{/* end isArray:56 */}} + {{end}} {{/* end isChan:16 */}} +} else { {{/* len < 0 */}} + {{var "j"}} := 0 + for ; !r.CheckBreak(); {{var "j"}}++ { + {{if isChan }} + {{var "h"}}.ElemContainerState({{var "j"}}) + var {{var "t"}} {{ .Typ }} + {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} + {{var "v"}} <- {{var "t"}} + {{ else }} + if {{var "j"}} >= len({{var "v"}}) { + {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) + {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} + {{var "c"}} = true {{end}} + } + {{var "h"}}.ElemContainerState({{var "j"}}) + if {{var "j"}} < len({{var "v"}}) { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } else { + z.DecSwallow() + } + {{end}} + } + {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + }{{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} +` + diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go new file mode 100644 index 0000000..da66921 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen.go @@ -0,0 +1,2020 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "go/format" + "io" + "io/ioutil" + "math/rand" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "text/template" + "time" + "unicode" + "unicode/utf8" +) + +// --------------------------------------------------- +// codecgen supports the full cycle of reflection-based codec: +// - RawExt +// - Raw +// - Builtins +// - Extensions +// - (Binary|Text|JSON)(Unm|M)arshal +// - generic by-kind +// +// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. +// In those areas, we try to only do reflection or interface-conversion when NECESSARY: +// - Extensions, only if Extensions are configured. +// +// However, codecgen doesn't support the following: +// - Canonical option. (codecgen IGNORES it currently) +// This is just because it has not been implemented. +// +// During encode/decode, Selfer takes precedence. +// A type implementing Selfer will know how to encode/decode itself statically. +// +// The following field types are supported: +// array: [n]T +// slice: []T +// map: map[K]V +// primitive: [u]int[n], float(32|64), bool, string +// struct +// +// --------------------------------------------------- +// Note that a Selfer cannot call (e|d).(En|De)code on itself, +// as this will cause a circular reference, as (En|De)code will call Selfer methods. +// Any type that implements Selfer must implement completely and not fallback to (En|De)code. +// +// In addition, code in this file manages the generation of fast-path implementations of +// encode/decode of slices/maps of primitive keys/values. +// +// Users MUST re-generate their implementations whenever the code shape changes. +// The generated code will panic if it was generated with a version older than the supporting library. +// --------------------------------------------------- +// +// codec framework is very feature rich. +// When encoding or decoding into an interface, it depends on the runtime type of the interface. +// The type of the interface may be a named type, an extension, etc. +// Consequently, we fallback to runtime codec for encoding/decoding interfaces. +// In addition, we fallback for any value which cannot be guaranteed at runtime. +// This allows us support ANY value, including any named types, specifically those which +// do not implement our interfaces (e.g. Selfer). +// +// This explains some slowness compared to other code generation codecs (e.g. msgp). +// This reduction in speed is only seen when your refers to interfaces, +// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } +// +// codecgen will panic if the file was generated with an old version of the library in use. +// +// Note: +// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// This way, there isn't a function call overhead just to see that we should not enter a block of code. + +// GenVersion is the current version of codecgen. +// +// NOTE: Increment this value each time codecgen changes fundamentally. +// Fundamental changes are: +// - helper methods change (signature change, new ones added, some removed, etc) +// - codecgen command line changes +// +// v1: Initial Version +// v2: +// v3: Changes for Kubernetes: +// changes in signature of some unpublished helper methods and codecgen cmdline arguments. +// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) +// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. +const GenVersion = 5 + +const ( + genCodecPkg = "codec1978" + genTempVarPfx = "yy" + genTopLevelVarName = "x" + + // ignore canBeNil parameter, and always set to true. + // This is because nil can appear anywhere, so we should always check. + genAnythingCanBeNil = true + + // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; + // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals + // are not executed a lot. + // + // From testing, it didn't make much difference in runtime, so keep as true (one function only) + genUseOneFunctionForDecStructMap = true +) + +type genStructMapStyle uint8 + +const ( + genStructMapStyleConsolidated genStructMapStyle = iota + genStructMapStyleLenPrefix + genStructMapStyleCheckBreak +) + +var ( + genAllTypesSamePkgErr = errors.New("All types must be in the same package") + genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") + genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") + genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) + genCheckVendor bool +) + +// genRunner holds some state used during a Gen run. +type genRunner struct { + w io.Writer // output + c uint64 // counter used for generating varsfx + t []reflect.Type // list of types to run selfer on + + tc reflect.Type // currently running selfer on this type + te map[uintptr]bool // types for which the encoder has been created + td map[uintptr]bool // types for which the decoder has been created + cp string // codec import path + + im map[string]reflect.Type // imports to add + imn map[string]string // package names of imports to add + imc uint64 // counter for import numbers + + is map[reflect.Type]struct{} // types seen during import search + bp string // base PkgPath, for which we are generating for + + cpfx string // codec package prefix + unsafe bool // is unsafe to be used in generated code? + + tm map[reflect.Type]struct{} // types for which enc/dec must be generated + ts []reflect.Type // types for which enc/dec must be generated + + xs string // top level variable/constant suffix + hn string // fn helper type name + + ti *TypeInfos + // rr *rand.Rand // random generator for file-specific types +} + +// Gen will write a complete go file containing Selfer implementations for each +// type passed. All the types must be in the same package. +// +// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.* +func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) { + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer + + if len(typ) == 0 { + return + } + x := genRunner{ + unsafe: useUnsafe, + w: w, + t: typ, + te: make(map[uintptr]bool), + td: make(map[uintptr]bool), + im: make(map[string]reflect.Type), + imn: make(map[string]string), + is: make(map[reflect.Type]struct{}), + tm: make(map[reflect.Type]struct{}), + ts: []reflect.Type{}, + bp: genImportPath(typ[0]), + xs: uid, + ti: ti, + } + if x.ti == nil { + x.ti = defTypeInfos + } + if x.xs == "" { + rr := rand.New(rand.NewSource(time.Now().UnixNano())) + x.xs = strconv.FormatInt(rr.Int63n(9999), 10) + } + + // gather imports first: + x.cp = genImportPath(reflect.TypeOf(x)) + x.imn[x.cp] = genCodecPkg + for _, t := range typ { + // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + if genImportPath(t) != x.bp { + panic(genAllTypesSamePkgErr) + } + x.genRefPkgs(t) + } + if buildTags != "" { + x.line("// +build " + buildTags) + x.line("") + } + x.line(` + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +`) + x.line("package " + pkgName) + x.line("") + x.line("import (") + if x.cp != x.bp { + x.cpfx = genCodecPkg + "." + x.linef("%s \"%s\"", genCodecPkg, x.cp) + } + // use a sorted set of im keys, so that we can get consistent output + imKeys := make([]string, 0, len(x.im)) + for k, _ := range x.im { + imKeys = append(imKeys, k) + } + sort.Strings(imKeys) + for _, k := range imKeys { // for k, _ := range x.im { + x.linef("%s \"%s\"", x.imn[k], k) + } + // add required packages + for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} { + if _, ok := x.im[k]; !ok { + if k == "unsafe" && !x.unsafe { + continue + } + x.line("\"" + k + "\"") + } + } + x.line(")") + x.line("") + + x.line("const (") + x.linef("// ----- content types ----") + x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8)) + x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW)) + x.linef("// ----- value types used ----") + x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray)) + x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap)) + x.linef("// ----- containerStateValues ----") + x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey)) + x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue)) + x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd)) + x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem)) + x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd)) + x.line(")") + x.line("var (") + x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())") + x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") + x.line(")") + x.line("") + + if x.unsafe { + x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}") + x.line("") + } + x.hn = "codecSelfer" + x.xs + x.line("type " + x.hn + " struct{}") + x.line("") + + x.varsfxreset() + x.line("func init() {") + x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion) + x.line("_, file, _, _ := runtime.Caller(0)") + x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) + x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx) + x.line("panic(err)") + x.linef("}") + x.line("if false { // reference the types, but skip this branch at build/run time") + var n int + // for k, t := range x.im { + for _, k := range imKeys { + t := x.im[k] + x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) + n++ + } + if x.unsafe { + x.linef("var v%v unsafe.Pointer", n) + n++ + } + if n > 0 { + x.out("_") + for i := 1; i < n; i++ { + x.out(", _") + } + x.out(" = v0") + for i := 1; i < n; i++ { + x.outf(", v%v", i) + } + } + x.line("} ") // close if false + x.line("}") // close init + x.line("") + + // generate rest of type info + for _, t := range typ { + x.tc = t + x.selfer(true) + x.selfer(false) + } + + for _, t := range x.ts { + rtid := reflect.ValueOf(t).Pointer() + // generate enc functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(true) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.encListFallback("v", t) + case reflect.Map: + x.encMapFallback("v", t) + default: + panic(genExpectArrayOrMapErr) + } + x.line("}") + x.line("") + + // generate dec functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(false) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.decListFallback("v", rtid, t) + case reflect.Map: + x.decMapFallback("v", rtid, t) + default: + panic(genExpectArrayOrMapErr) + } + x.line("}") + x.line("") + } + + x.line("") +} + +func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { + // return varname != genTopLevelVarName && t != x.tc + // the only time we checkForSelfer is if we are not at the TOP of the generated code. + return varname != genTopLevelVarName +} + +func (x *genRunner) arr2str(t reflect.Type, s string) string { + if t.Kind() == reflect.Array { + return s + } + return "" +} + +func (x *genRunner) genRequiredMethodVars(encode bool) { + x.line("var h " + x.hn) + if encode { + x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") + } else { + x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") + } + x.line("_, _, _ = h, z, r") +} + +func (x *genRunner) genRefPkgs(t reflect.Type) { + if _, ok := x.is[t]; ok { + return + } + // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + x.is[t] = struct{}{} + tpkg, tname := genImportPath(t), t.Name() + if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { + if _, ok := x.im[tpkg]; !ok { + x.im[tpkg] = t + if idx := strings.LastIndex(tpkg, "/"); idx < 0 { + x.imn[tpkg] = tpkg + } else { + x.imc++ + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) + } + } + } + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: + x.genRefPkgs(t.Elem()) + case reflect.Map: + x.genRefPkgs(t.Elem()) + x.genRefPkgs(t.Key()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { + x.genRefPkgs(t.Field(i).Type) + } + } + } +} + +func (x *genRunner) line(s string) { + x.out(s) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) varsfx() string { + x.c++ + return strconv.FormatUint(x.c, 10) +} + +func (x *genRunner) varsfxreset() { + x.c = 0 +} + +func (x *genRunner) out(s string) { + if _, err := io.WriteString(x.w, s); err != nil { + panic(err) + } +} + +func (x *genRunner) linef(s string, params ...interface{}) { + x.line(fmt.Sprintf(s, params...)) +} + +func (x *genRunner) outf(s string, params ...interface{}) { + x.out(fmt.Sprintf(s, params...)) +} + +func (x *genRunner) genTypeName(t reflect.Type) (n string) { + // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() + + // if the type has a PkgPath, which doesn't match the current package, + // then include it. + // We cannot depend on t.String() because it includes current package, + // or t.PkgPath because it includes full import path, + // + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "*" + t = t.Elem() + } + if tn := t.Name(); tn != "" { + return ptrPfx + x.genTypeNamePrim(t) + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) + case reflect.Slice: + return ptrPfx + "[]" + x.genTypeName(t.Elem()) + case reflect.Array: + return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) + case reflect.Chan: + return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) + default: + if t == intfTyp { + return ptrPfx + "interface{}" + } else { + return ptrPfx + x.genTypeNamePrim(t) + } + } +} + +func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { + if t.Name() == "" { + return t.String() + } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { + return t.Name() + } else { + return x.imn[genImportPath(t)] + "." + t.Name() + // return t.String() // best way to get the package name inclusive + } +} + +func (x *genRunner) genZeroValueR(t reflect.Type) string { + // if t is a named type, w + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, + reflect.Slice, reflect.Map, reflect.Invalid: + return "nil" + case reflect.Bool: + return "false" + case reflect.String: + return `""` + case reflect.Struct, reflect.Array: + return x.genTypeName(t) + "{}" + default: // all numbers + return "0" + } +} + +func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { + return genMethodNameT(t, x.tc) +} + +func (x *genRunner) selfer(encode bool) { + t := x.tc + t0 := t + // always make decode use a pointer receiver, + // and structs always use a ptr receiver (encode|decode) + isptr := !encode || t.Kind() == reflect.Struct + x.varsfxreset() + fnSigPfx := "func (x " + if isptr { + fnSigPfx += "*" + } + fnSigPfx += x.genTypeName(t) + + x.out(fnSigPfx) + if isptr { + t = reflect.PtrTo(t) + } + if encode { + x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") + x.genRequiredMethodVars(true) + // x.enc(genTopLevelVarName, t) + x.encVar(genTopLevelVarName, t) + } else { + x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + // do not use decVar, as there is no need to check TryDecodeAsNil + // or way to elegantly handle that, and also setting it to a + // non-nil value doesn't affect the pointer passed. + // x.decVar(genTopLevelVarName, t, false) + x.dec(genTopLevelVarName, t0) + } + x.line("}") + x.line("") + + if encode || t0.Kind() != reflect.Struct { + return + } + + // write is containerMap + if genUseOneFunctionForDecStructMap { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated) + x.line("}") + x.line("") + } else { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix) + x.line("}") + x.line("") + + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak) + x.line("}") + x.line("") + } + + // write containerArray + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0) + x.line("}") + x.line("") + +} + +// used for chan, array, slice, map +func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) { + if encode { + x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname) + } else { + x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname) + } + x.registerXtraT(t) +} + +func (x *genRunner) registerXtraT(t reflect.Type) { + // recursively register the types + if _, ok := x.tm[t]; ok { + return + } + var tkey reflect.Type + switch t.Kind() { + case reflect.Chan, reflect.Slice, reflect.Array: + case reflect.Map: + tkey = t.Key() + default: + return + } + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + // check if this refers to any xtra types eg. a slice of array: add the array + x.registerXtraT(t.Elem()) + if tkey != nil { + x.registerXtraT(tkey) + } +} + +// encVar will encode a variable. +// The parameter, t, is the reflect.Type of the variable itself +func (x *genRunner) encVar(varname string, t reflect.Type) { + // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) + var checkNil bool + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: + checkNil = true + } + if checkNil { + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + } + switch t.Kind() { + case reflect.Ptr: + switch t.Elem().Kind() { + case reflect.Struct, reflect.Array: + x.enc(varname, genNonPtr(t)) + default: + i := x.varsfx() + x.line(genTempVarPfx + i + " := *" + varname) + x.enc(genTempVarPfx+i, genNonPtr(t)) + } + case reflect.Struct, reflect.Array: + i := x.varsfx() + x.line(genTempVarPfx + i + " := &" + varname) + x.enc(genTempVarPfx+i, t) + default: + x.enc(varname, t) + } + + if checkNil { + x.line("}") + } + +} + +// enc will encode a variable (varname) of type t, +// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying) +func (x *genRunner) enc(varname string, t reflect.Type) { + rtid := reflect.ValueOf(t).Pointer() + // We call CodecEncodeSelf if one of the following are honored: + // - the type already implements Selfer, call that + // - the type has a Selfer implementation just created, use that + // - the type is in the list of the ones we will generate for, but it is not currently being generated + + mi := x.varsfx() + tptr := reflect.PtrTo(t) + tk := t.Kind() + if x.checkForSelfer(t, varname) { + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if tptr.Implements(selferTyp) || t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } else { // varname is of type T + if t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } else if tptr.Implements(selferTyp) { + x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) + x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) + return + } + } + + if _, ok := x.te[rtid]; ok { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.te[rtid] = true + rtidAdded = true + } + + // check if + // - type is RawExt, Raw + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi) + x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == rawTyp { + x.linef("} else { z.EncRaw(%v)", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.EncodeRawExt(%v, e)", varname) + return + } + // HACK: Support for Builtins. + // Currently, only Binc supports builtins, and the only builtin type is time.Time. + // Have a method that returns the rtid for time.Time if Handle is Binc. + if t == timeTyp { + vrtid := genTempVarPfx + "m" + x.varsfx() + x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) + x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname) + } + // only check for extensions if the type is named, and has a packagePath. + if genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) + } + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } + } else { // varname is of type T + if t.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname) + } + } + x.line("} else {") + + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(int64(" + varname + "))") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(uint64(" + varname + "))") + case reflect.Float32: + x.line("r.EncodeFloat32(float32(" + varname + "))") + case reflect.Float64: + x.line("r.EncodeFloat64(float64(" + varname + "))") + case reflect.Bool: + x.line("r.EncodeBool(bool(" + varname + "))") + case reflect.String: + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))") + case reflect.Chan: + x.xtraSM(varname, true, t) + // x.encListFallback(varname, rtid, t) + case reflect.Array: + x.xtraSM(varname, true, t) + case reflect.Slice: + // if nil, call dedicated function + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") + } else { + x.xtraSM(varname, true, t) + // x.encListFallback(varname, rtid, t) + } + case reflect.Map: + // if nil, call dedicated function + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") + } else { + x.xtraSM(varname, true, t) + // x.encMapFallback(varname, rtid, t) + } + case reflect.Struct: + if !inlist { + delete(x.te, rtid) + x.line("z.EncFallback(" + varname + ")") + break + } + x.encStruct(varname, rtid, t) + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.EncFallback(" + varname + ")") + } +} + +func (x *genRunner) encZero(t reflect.Type) { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(0)") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(0)") + case reflect.Float32: + x.line("r.EncodeFloat32(0)") + case reflect.Float64: + x.line("r.EncodeFloat64(0)") + case reflect.Bool: + x.line("r.EncodeBool(false)") + case reflect.String: + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`) + default: + x.line("r.EncodeNil()") + } +} + +func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { + // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) + // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it + + // if t === type currently running selfer on, do for all + ti := x.ti.get(rtid, t) + i := x.varsfx() + sepVarname := genTempVarPfx + "sep" + i + numfieldsvar := genTempVarPfx + "q" + i + ti2arrayvar := genTempVarPfx + "r" + i + struct2arrvar := genTempVarPfx + "2arr" + i + + x.line(sepVarname + " := !z.EncBinary()") + x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + // due to omitEmpty, we need to calculate the + // number of non-empty things we write out first. + // This is required as we need to pre-determine the size of the container, + // to support length-prefixing. + x.linef("var %s [%v]bool", numfieldsvar, len(tisfi)) + x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar) + x.linef("const %s bool = %v", ti2arrayvar, ti.toArray) + nn := 0 + for j, si := range tisfi { + if !si.omitEmpty { + nn++ + continue + } + var t2 reflect.StructField + var omitline string + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + t2typ := t + varname3 := varname + for _, ix := range si.is { + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + omitline += varname3 + " != nil && " + } + } + } + // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. + // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) + switch t2.Type.Kind() { + case reflect.Struct: + omitline += " true" + case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: + omitline += "len(" + varname + "." + t2.Name + ") != 0" + default: + omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type) + } + x.linef("%s[%v] = %s", numfieldsvar, j, omitline) + } + x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") + x.linef("} else {") // if not ti.toArray + x.linef("%snn%s = %v", genTempVarPfx, i, nn) + x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) + x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i) + x.linef("%snn%s = %v", genTempVarPfx, i, 0) + // x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") + x.line("}") // close if not StructToArray + + for j, si := range tisfi { + i := x.varsfx() + isNilVarName := genTempVarPfx + "n" + i + var labelUsed bool + var t2 reflect.StructField + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + t2typ := t + varname3 := varname + for _, ix := range si.is { + // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix) + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + if !labelUsed { + x.line("var " + isNilVarName + " bool") + } + x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") + x.line("goto LABEL" + i) + x.line("}") + labelUsed = true + // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") + } + } + // t2 = t.FieldByIndex(si.is) + } + if labelUsed { + x.line("LABEL" + i + ":") + } + // if the type of the field is a Selfer, or one of the ones + + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + } + x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + if si.omitEmpty { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.encVar(varname+"."+t2.Name, t2.Type) + if si.omitEmpty { + x.linef("} else {") + x.encZero(t2.Type) + x.linef("}") + } + if labelUsed { + x.line("}") + } + + x.linef("} else {") // if not ti.toArray + + if si.omitEmpty { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))") + x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + x.encVar(varname+"."+t2.Name, t2.Type) + x.line("}") + } else { + x.encVar(varname+"."+t2.Name, t2.Type) + } + if si.omitEmpty { + x.line("}") + } + x.linef("} ") // end if/else ti.toArray + } + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("} else {") + x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + x.line("}") + +} + +func (x *genRunner) encListFallback(varname string, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname) + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname) + return + } + i := x.varsfx() + g := genTempVarPfx + x.line("r.EncodeArrayStart(len(" + varname + "))") + if t.Kind() == reflect.Chan { + x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i) + x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.linef("%sv%s := <-%s", g, i, varname) + } else { + // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) + x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + } + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) +} + +func (x *genRunner) encMapFallback(varname string, t reflect.Type) { + // TODO: expand this to handle canonical. + i := x.varsfx() + x.line("r.EncodeMapStart(len(" + varname + "))") + x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {") + x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.encVar(genTempVarPfx+"k"+i, t.Key()) + x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) +} + +func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) { + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + i := x.varsfx() + if !canBeNil { + canBeNil = genAnythingCanBeNil || !genIsImmutable(t) + } + if canBeNil { + x.line("if r.TryDecodeAsNil() {") + if t.Kind() == reflect.Ptr { + x.line("if " + varname + " != nil { ") + + // if varname is a field of a struct (has a dot in it), + // then just set it to nil + if strings.IndexByte(varname, '.') != -1 { + x.line(varname + " = nil") + } else { + x.line("*" + varname + " = " + x.genZeroValueR(t.Elem())) + } + x.line("}") + } else { + x.line(varname + " = " + x.genZeroValueR(t)) + } + x.line("} else {") + } else { + x.line("// cannot be nil") + } + if t.Kind() != reflect.Ptr { + if x.decTryAssignPrimitive(varname, t) { + x.line(genTempVarPfx + "v" + i + " := &" + varname) + x.dec(genTempVarPfx+"v"+i, t) + } + } else { + x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) + // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). + // There's a chance of a **T in here which is nil. + var ptrPfx string + for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { + ptrPfx += "*" + x.linef("if %s%s == nil { %s%s = new(%s)}", + ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) + } + // if varname has [ in it, then create temp variable for this ptr thingie + if strings.Index(varname, "[") >= 0 { + varname2 := genTempVarPfx + "w" + i + x.line(varname2 + " := " + varname) + varname = varname2 + } + + if ptrPfx == "" { + x.dec(varname, t) + } else { + x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname) + x.dec(genTempVarPfx+"z"+i, t) + } + + } + + if canBeNil { + x.line("} ") + } +} + +// dec will decode a variable (varname) of type ptrTo(t). +// t is always a basetype (i.e. not of kind reflect.Ptr). +func (x *genRunner) dec(varname string, t reflect.Type) { + // assumptions: + // - the varname is to a pointer already. No need to take address of it + // - t is always a baseType T (not a *T, etc). + rtid := reflect.ValueOf(t).Pointer() + tptr := reflect.PtrTo(t) + if x.checkForSelfer(t, varname) { + if t.Implements(selferTyp) || tptr.Implements(selferTyp) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + if _, ok := x.td[rtid]; ok { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.td[rtid] = true + rtidAdded = true + } + + // check if + // - type is Raw, RawExt + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + mi := x.varsfx() + x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) + x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == rawTyp { + x.linef("} else { *%v = z.DecRaw()", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.DecodeExt(%v, 0, nil)", varname) + return + } + + // HACK: Support for Builtins. + // Currently, only Binc supports builtins, and the only builtin type is time.Time. + // Have a method that returns the rtid for time.Time if Handle is Binc. + if t == timeTyp { + vrtid := genTempVarPfx + "m" + x.varsfx() + x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) + x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname) + } + // only check for extensions if the type is named, and has a packagePath. + if genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) + } + + if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { + x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname) + } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { + x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname) + } + + x.line("} else {") + + // Since these are pointers, we cannot share, and have to use them one by one + switch t.Kind() { + case reflect.Int: + x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))") + // x.line("z.DecInt((*int)(" + varname + "))") + case reflect.Int8: + x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))") + // x.line("z.DecInt8((*int8)(" + varname + "))") + case reflect.Int16: + x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))") + // x.line("z.DecInt16((*int16)(" + varname + "))") + case reflect.Int32: + x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))") + // x.line("z.DecInt32((*int32)(" + varname + "))") + case reflect.Int64: + x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))") + // x.line("z.DecInt64((*int64)(" + varname + "))") + + case reflect.Uint: + x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))") + // x.line("z.DecUint((*uint)(" + varname + "))") + case reflect.Uint8: + x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))") + // x.line("z.DecUint8((*uint8)(" + varname + "))") + case reflect.Uint16: + x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))") + //x.line("z.DecUint16((*uint16)(" + varname + "))") + case reflect.Uint32: + x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))") + //x.line("z.DecUint32((*uint32)(" + varname + "))") + case reflect.Uint64: + x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))") + //x.line("z.DecUint64((*uint64)(" + varname + "))") + case reflect.Uintptr: + x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))") + + case reflect.Float32: + x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))") + //x.line("z.DecFloat32((*float32)(" + varname + "))") + case reflect.Float64: + x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))") + // x.line("z.DecFloat64((*float64)(" + varname + "))") + + case reflect.Bool: + x.line("*((*bool)(" + varname + ")) = r.DecodeBool()") + // x.line("z.DecBool((*bool)(" + varname + "))") + case reflect.String: + x.line("*((*string)(" + varname + ")) = r.DecodeString()") + // x.line("z.DecString((*string)(" + varname + "))") + case reflect.Array, reflect.Chan: + x.xtraSM(varname, false, t) + // x.decListFallback(varname, rtid, true, t) + case reflect.Slice: + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") + } else { + x.xtraSM(varname, false, t) + // x.decListFallback(varname, rtid, false, t) + } + case reflect.Map: + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") + } else { + x.xtraSM(varname, false, t) + // x.decMapFallback(varname, rtid, t) + } + case reflect.Struct: + if inlist { + x.decStruct(varname, rtid, t) + } else { + // delete(x.td, rtid) + x.line("z.DecFallback(" + varname + ", false)") + } + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.DecFallback(" + varname + ", true)") + } +} + +func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) { + // This should only be used for exact primitives (ie un-named types). + // Named types may be implementations of Selfer, Unmarshaler, etc. + // They should be handled by dec(...) + + if t.Name() != "" { + tryAsPtr = true + return + } + + switch t.Kind() { + case reflect.Int: + x.linef("%s = r.DecodeInt(codecSelferBitsize%s)", varname, x.xs) + case reflect.Int8: + x.linef("%s = r.DecodeInt(8)", varname) + case reflect.Int16: + x.linef("%s = r.DecodeInt(16)", varname) + case reflect.Int32: + x.linef("%s = r.DecodeInt(32)", varname) + case reflect.Int64: + x.linef("%s = r.DecodeInt(64)", varname) + + case reflect.Uint: + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) + case reflect.Uint8: + x.linef("%s = r.DecodeUint(8)", varname) + case reflect.Uint16: + x.linef("%s = r.DecodeUint(16)", varname) + case reflect.Uint32: + x.linef("%s = r.DecodeUint(32)", varname) + case reflect.Uint64: + x.linef("%s = r.DecodeUint(64)", varname) + case reflect.Uintptr: + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) + + case reflect.Float32: + x.linef("%s = r.DecodeFloat(true)", varname) + case reflect.Float64: + x.linef("%s = r.DecodeFloat(false)", varname) + + case reflect.Bool: + x.linef("%s = r.DecodeBool()", varname) + case reflect.String: + x.linef("%s = r.DecodeString()", varname) + default: + tryAsPtr = true + } + return +} + +func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)") + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname) + return + } + type tstruc struct { + TempVar string + Rand string + Varname string + CTyp string + Typ string + Immutable bool + Size int + } + telem := t.Elem() + ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} + + funcs := make(template.FuncMap) + + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, telem, false) + return "" + } + funcs["decLine"] = func(pfx string) string { + x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + funcs["zero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["isArray"] = func() bool { + return t.Kind() == reflect.Array + } + funcs["isSlice"] = func() bool { + return t.Kind() == reflect.Slice + } + funcs["isChan"] = func() bool { + return t.Kind() == reflect.Chan + } + tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { + type tstruc struct { + TempVar string + Sfx string + Rand string + Varname string + KTyp string + Typ string + Size int + } + telem := t.Elem() + tkey := t.Key() + ts := tstruc{ + genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), + x.genTypeName(telem), int(telem.Size() + tkey.Size()), + } + + funcs := make(template.FuncMap) + funcs["decElemZero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["decElemKindImmutable"] = func() bool { + return genIsImmutable(telem) + } + funcs["decElemKindPtr"] = func() bool { + return telem.Kind() == reflect.Ptr + } + funcs["decElemKindIntf"] = func() bool { + return telem.Kind() == reflect.Interface + } + funcs["decLineVarK"] = func(varname string) string { + x.decVar(varname, tkey, false) + return "" + } + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, telem, false) + return "" + } + funcs["decLineK"] = func(pfx string) string { + x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false) + return "" + } + funcs["decLine"] = func(pfx string) string { + x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + + tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { + ti := x.ti.get(rtid, t) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + x.line("switch (" + kName + ") {") + for _, si := range tisfi { + x.line("case \"" + si.encName + "\":") + var t2 reflect.StructField + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + for _, ix := range si.is { + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + } + } + x.decVar(varname+"."+t2.Name, t2.Type, false) + } + x.line("default:") + // pass the slice here, so that the string will not escape, and maybe save allocation + x.line("z.DecStructFieldNotFound(-1, " + kName + ")") + x.line("} // end switch " + kName) +} + +func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { + tpfx := genTempVarPfx + i := x.varsfx() + kName := tpfx + "s" + i + + // We thought to use ReadStringAsBytes, as go compiler might optimize the copy out. + // However, using that was more expensive, as it seems that the switch expression + // is evaluated each time. + // + // We could depend on decodeString using a temporary/shared buffer internally. + // However, this model of creating a byte array, and using explicitly is faster, + // and allows optional use of unsafe []byte->string conversion without alloc. + + // Also, ensure that the slice array doesn't escape. + // That will help escape analysis prevent allocation when it gets better. + + // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into") + // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into") + // use the scratch buffer to avoid allocation (most field names are < 32). + + x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into") + + x.line("_ = " + kName + "Slc") + switch style { + case genStructMapStyleLenPrefix: + x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) + case genStructMapStyleCheckBreak: + x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) + default: // 0, otherwise. + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) + x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) + x.line("} else { if r.CheckBreak() { break }; }") + } + x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)") + // let string be scoped to this loop alone, so it doesn't escape. + if x.unsafe { + x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" + + kName + "Slc[0])), len(" + kName + "Slc)}") + x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))") + } else { + x.line(kName + " := string(" + kName + "Slc)") + } + x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.decStructMapSwitch(kName, varname, rtid, t) + + x.line("} // end for " + tpfx + "j" + i) + x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) +} + +func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { + tpfx := genTempVarPfx + i := x.varsfx() + ti := x.ti.get(rtid, t) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + x.linef("var %sj%s int", tpfx, i) + x.linef("var %sb%s bool", tpfx, i) // break + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + for _, si := range tisfi { + var t2 reflect.StructField + if si.i != -1 { + t2 = t.Field(int(si.i)) + } else { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + for _, ix := range si.is { + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(ix) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + } + } + + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", + tpfx, i, x.xs, breakString) + x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.decVar(varname+"."+t2.Name, t2.Type, true) + } + // read remaining values and throw away. + x.line("for {") + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { break }", tpfx, i) + x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) + x.line("}") + x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) +} + +func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { + // if container is map + i := x.varsfx() + x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) + x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + if genUseOneFunctionForDecStructMap { + x.line("} else { ") + x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i) + } else { + x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") + x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") + x.line("} else {") + x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") + } + x.line("}") + + // else if container is array + x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("} else { ") + x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i) + x.line("}") + // else panic + x.line("} else { ") + x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")") + x.line("} ") +} + +// -------- + +type genV struct { + // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice + MapKey string + Elem string + Primitive string + Size int +} + +func (x *genRunner) newGenV(t reflect.Type) (v genV) { + switch t.Kind() { + case reflect.Slice, reflect.Array: + te := t.Elem() + v.Elem = x.genTypeName(te) + v.Size = int(te.Size()) + case reflect.Map: + te, tk := t.Elem(), t.Key() + v.Elem = x.genTypeName(te) + v.MapKey = x.genTypeName(tk) + v.Size = int(te.Size() + tk.Size()) + default: + panic("unexpected type for newGenV. Requires map or slice type") + } + return +} + +func (x *genV) MethodNamePfx(prefix string, prim bool) string { + var name []byte + if prefix != "" { + name = append(name, prefix...) + } + if prim { + name = append(name, genTitleCaseName(x.Primitive)...) + } else { + if x.MapKey == "" { + name = append(name, "Slice"...) + } else { + name = append(name, "Map"...) + name = append(name, genTitleCaseName(x.MapKey)...) + } + name = append(name, genTitleCaseName(x.Elem)...) + } + return string(name) + +} + +// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. +// +// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, +// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. +// We strip it here. +func genImportPath(t reflect.Type) (s string) { + s = t.PkgPath() + if genCheckVendor { + // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7 + s = stripVendor(s) + } + return +} + +// A go identifier is (letter|_)[letter|number|_]* +func genGoIdentifier(s string, checkFirstChar bool) string { + b := make([]byte, 0, len(s)) + t := make([]byte, 4) + var n int + for i, r := range s { + if checkFirstChar && i == 0 && !unicode.IsLetter(r) { + b = append(b, '_') + } + // r must be unicode_letter, unicode_digit or _ + if unicode.IsLetter(r) || unicode.IsDigit(r) { + n = utf8.EncodeRune(t, r) + b = append(b, t[:n]...) + } else { + b = append(b, '_') + } + } + return string(b) +} + +func genNonPtr(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func genTitleCaseName(s string) string { + switch s { + case "interface{}", "interface {}": + return "Intf" + default: + return strings.ToUpper(s[0:1]) + s[1:] + } +} + +func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "Ptrto" + t = t.Elem() + } + tstr := t.String() + if tn := t.Name(); tn != "" { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + return ptrPfx + tn + } else { + if genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) + case reflect.Slice: + return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) + case reflect.Array: + return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) + case reflect.Chan: + var cx string + switch t.ChanDir() { + case reflect.SendDir: + cx = "ChanSend" + case reflect.RecvDir: + cx = "ChanRecv" + default: + cx = "Chan" + } + return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) + default: + if t == intfTyp { + return ptrPfx + "Interface" + } else { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + if t.Name() != "" { + return ptrPfx + t.Name() + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } else { + // best way to get the package name inclusive + // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) + if t.Name() != "" && genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + } +} + +// genCustomNameForType base64encodes the t.String() value in such a way +// that it can be used within a function name. +func genCustomTypeName(tstr string) string { + len2 := genBase64enc.EncodedLen(len(tstr)) + bufx := make([]byte, len2) + genBase64enc.Encode(bufx, []byte(tstr)) + for i := len2 - 1; i >= 0; i-- { + if bufx[i] == '=' { + len2-- + } else { + break + } + } + return string(bufx[:len2]) +} + +func genIsImmutable(t reflect.Type) (v bool) { + return isImmutableKind(t.Kind()) +} + +type genInternal struct { + Values []genV + Unsafe bool +} + +func (x genInternal) FastpathLen() (l int) { + for _, v := range x.Values { + if v.Primitive == "" { + l++ + } + } + return +} + +func genInternalZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + return "nil" + case "bool": + return "false" + case "string": + return `""` + default: + return "0" + } +} + +func genInternalEncCommandAsString(s string, vname string) string { + switch s { + case "uint", "uint8", "uint16", "uint32", "uint64": + return "ee.EncodeUint(uint64(" + vname + "))" + case "int", "int8", "int16", "int32", "int64": + return "ee.EncodeInt(int64(" + vname + "))" + case "string": + return "ee.EncodeString(c_UTF8, " + vname + ")" + case "float32": + return "ee.EncodeFloat32(" + vname + ")" + case "float64": + return "ee.EncodeFloat64(" + vname + ")" + case "bool": + return "ee.EncodeBool(" + vname + ")" + case "symbol": + return "ee.EncodeSymbol(" + vname + ")" + default: + return "e.encode(" + vname + ")" + } +} + +func genInternalDecCommandAsString(s string) string { + switch s { + case "uint": + return "uint(dd.DecodeUint(uintBitsize))" + case "uint8": + return "uint8(dd.DecodeUint(8))" + case "uint16": + return "uint16(dd.DecodeUint(16))" + case "uint32": + return "uint32(dd.DecodeUint(32))" + case "uint64": + return "dd.DecodeUint(64)" + case "uintptr": + return "uintptr(dd.DecodeUint(uintBitsize))" + case "int": + return "int(dd.DecodeInt(intBitsize))" + case "int8": + return "int8(dd.DecodeInt(8))" + case "int16": + return "int16(dd.DecodeInt(16))" + case "int32": + return "int32(dd.DecodeInt(32))" + case "int64": + return "dd.DecodeInt(64)" + + case "string": + return "dd.DecodeString()" + case "float32": + return "float32(dd.DecodeFloat(true))" + case "float64": + return "dd.DecodeFloat(false)" + case "bool": + return "dd.DecodeBool()" + default: + panic(errors.New("gen internal: unknown type for decode: " + s)) + } +} + +func genInternalSortType(s string, elem bool) string { + for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { + if strings.HasPrefix(s, v) { + if elem { + if v == "int" || v == "uint" || v == "float" { + return v + "64" + } else { + return v + } + } + return v + "Slice" + } + } + panic("sorttype: unexpected type: " + s) +} + +func stripVendor(s string) string { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + return s +} + +// var genInternalMu sync.Mutex +var genInternalV genInternal +var genInternalTmplFuncs template.FuncMap +var genInternalOnce sync.Once + +func genInternalInit() { + types := [...]string{ + "interface{}", + "string", + "float32", + "float64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "bool", + } + // keep as slice, so it is in specific iteration order. + // Initial order was uint64, string, interface{}, int, int64 + mapvaltypes := [...]string{ + "interface{}", + "string", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "bool", + } + wordSizeBytes := int(intBitsize) / 8 + + mapvaltypes2 := map[string]int{ + "interface{}": 2 * wordSizeBytes, + "string": 2 * wordSizeBytes, + "uint": 1 * wordSizeBytes, + "uint8": 1, + "uint16": 2, + "uint32": 4, + "uint64": 8, + "uintptr": 1 * wordSizeBytes, + "int": 1 * wordSizeBytes, + "int8": 1, + "int16": 2, + "int32": 4, + "int64": 8, + "float32": 4, + "float64": 8, + "bool": 1, + } + var gt genInternal + + // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function + for _, s := range types { + gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) + if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. + gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + } + if _, ok := mapvaltypes2[s]; !ok { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) + } + for _, ms := range mapvaltypes { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) + } + } + + funcs := make(template.FuncMap) + // funcs["haspfx"] = strings.HasPrefix + funcs["encmd"] = genInternalEncCommandAsString + funcs["decmd"] = genInternalDecCommandAsString + funcs["zerocmd"] = genInternalZeroValue + funcs["hasprefix"] = strings.HasPrefix + funcs["sorttype"] = genInternalSortType + + genInternalV = gt + genInternalTmplFuncs = funcs +} + +// genInternalGoFile is used to generate source files from templates. +// It is run by the program author alone. +// Unfortunately, it has to be exported so that it can be called from a command line tool. +// *** DO NOT USE *** +func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) { + genInternalOnce.Do(genInternalInit) + + gt := genInternalV + gt.Unsafe = !safe + + t := template.New("").Funcs(genInternalTmplFuncs) + + tmplstr, err := ioutil.ReadAll(r) + if err != nil { + return + } + + if t, err = t.Parse(string(tmplstr)); err != nil { + return + } + + var out bytes.Buffer + err = t.Execute(&out, gt) + if err != nil { + return + } + + bout, err := format.Source(out.Bytes()) + if err != nil { + w.Write(out.Bytes()) // write out if error, so we can still see. + // w.Write(bout) // write out if error, as much as possible, so we can still see. + return + } + w.Write(bout) + return +} diff --git a/vendor/github.com/ugorji/go/codec/gen_15.go b/vendor/github.com/ugorji/go/codec/gen_15.go new file mode 100644 index 0000000..ab76c31 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_15.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5,!go1.6 + +package codec + +import "os" + +func init() { + genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" +} diff --git a/vendor/github.com/ugorji/go/codec/gen_16.go b/vendor/github.com/ugorji/go/codec/gen_16.go new file mode 100644 index 0000000..87c04e2 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_16.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.6 + +package codec + +import "os" + +func init() { + genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" +} diff --git a/vendor/github.com/ugorji/go/codec/gen_17.go b/vendor/github.com/ugorji/go/codec/gen_17.go new file mode 100644 index 0000000..3881a43 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_17.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.7 + +package codec + +func init() { + genCheckVendor = true +} diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go new file mode 100644 index 0000000..8b94fc1 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper.go @@ -0,0 +1,1314 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +// Some shared ideas around encoding/decoding +// ------------------------------------------ +// +// If an interface{} is passed, we first do a type assertion to see if it is +// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. +// +// If we start with a reflect.Value, we are already in reflect.Value land and +// will try to grab the function for the underlying Type and directly call that function. +// This is more performant than calling reflect.Value.Interface(). +// +// This still helps us bypass many layers of reflection, and give best performance. +// +// Containers +// ------------ +// Containers in the stream are either associative arrays (key-value pairs) or +// regular arrays (indexed by incrementing integers). +// +// Some streams support indefinite-length containers, and use a breaking +// byte-sequence to denote that the container has come to an end. +// +// Some streams also are text-based, and use explicit separators to denote the +// end/beginning of different values. +// +// During encode, we use a high-level condition to determine how to iterate through +// the container. That decision is based on whether the container is text-based (with +// separators) or binary (without separators). If binary, we do not even call the +// encoding of separators. +// +// During decode, we use a different high-level condition to determine how to iterate +// through the containers. That decision is based on whether the stream contained +// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that +// it has to be binary, and we do not even try to read separators. +// +// Philosophy +// ------------ +// On decode, this codec will update containers appropriately: +// - If struct, update fields from stream into fields of struct. +// If field in stream not found in struct, handle appropriately (based on option). +// If a struct field has no corresponding value in the stream, leave it AS IS. +// If nil in stream, set value to nil/zero value. +// - If map, update map from stream. +// If the stream value is NIL, set the map to nil. +// - if slice, try to update up to length of array in stream. +// if container len is less than stream array length, +// and container cannot be expanded, handled (based on option). +// This means you can decode 4-element stream array into 1-element array. +// +// ------------------------------------ +// On encode, user can specify omitEmpty. This means that the value will be omitted +// if the zero value. The problem may occur during decode, where omitted values do not affect +// the value being decoded into. This means that if decoding into a struct with an +// int field with current value=5, and the field is omitted in the stream, then after +// decoding, the value will still be 5 (not 0). +// omitEmpty only works if you guarantee that you always decode into zero-values. +// +// ------------------------------------ +// We could have truncated a map to remove keys not available in the stream, +// or set values in the struct which are not in the stream to their zero values. +// We decided against it because there is no efficient way to do it. +// We may introduce it as an option later. +// However, that will require enabling it for both runtime and code generation modes. +// +// To support truncate, we need to do 2 passes over the container: +// map +// - first collect all keys (e.g. in k1) +// - for each key in stream, mark k1 that the key should not be removed +// - after updating map, do second pass and call delete for all keys in k1 which are not marked +// struct: +// - for each field, track the *typeInfo s1 +// - iterate through all s1, and for each one not marked, set value to zero +// - this involves checking the possible anonymous fields which are nil ptrs. +// too much work. +// +// ------------------------------------------ +// Error Handling is done within the library using panic. +// +// This way, the code doesn't have to keep checking if an error has happened, +// and we don't have to keep sending the error value along with each call +// or storing it in the En|Decoder and checking it constantly along the way. +// +// The disadvantage is that small functions which use panics cannot be inlined. +// The code accounts for that by only using panics behind an interface; +// since interface calls cannot be inlined, this is irrelevant. +// +// We considered storing the error is En|Decoder. +// - once it has its err field set, it cannot be used again. +// - panicing will be optional, controlled by const flag. +// - code should always check error first and return early. +// We eventually decided against it as it makes the code clumsier to always +// check for these error conditions. + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +const ( + scratchByteArrayLen = 32 + initCollectionCap = 32 // 32 is defensive. 16 is preferred. + + // Support encoding.(Binary|Text)(Unm|M)arshaler. + // This constant flag will enable or disable it. + supportMarshalInterfaces = true + + // Each Encoder or Decoder uses a cache of functions based on conditionals, + // so that the conditionals are not run every time. + // + // Either a map or a slice is used to keep track of the functions. + // The map is more natural, but has a higher cost than a slice/array. + // This flag (useMapForCodecCache) controls which is used. + // + // From benchmarks, slices with linear search perform better with < 32 entries. + // We have typically seen a high threshold of about 24 entries. + useMapForCodecCache = false + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true + + // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first. + // Only concern is that, if the slice already contained some garbage, we will decode into that garbage. + // The chances of this are slim, so leave this "optimization". + // TODO: should this be true, to ensure that we always decode into a "zero" "empty" value? + resetSliceElemToZeroValue bool = false +) + +var ( + oneByteArr = [1]byte{0} + zeroByteSlice = oneByteArr[:0:0] +) + +type charEncoding uint8 + +const ( + c_RAW charEncoding = iota + c_UTF8 + c_UTF16LE + c_UTF16BE + c_UTF32LE + c_UTF32BE +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTimestamp + valueTypeExt + + // valueTypeInvalid = 0xff +) + +type seqType uint8 + +const ( + _ seqType = iota + seqTypeArray + seqTypeSlice + seqTypeChan +) + +// note that containerMapStart and containerArraySend are not sent. +// This is because the ReadXXXStart and EncodeXXXStart already does these. +type containerState uint8 + +const ( + _ containerState = iota + + containerMapStart // slot left open, since Driver method already covers it + containerMapKey + containerMapValue + containerMapEnd + containerArrayStart // slot left open, since Driver methods already cover it + containerArrayElem + containerArrayEnd +) + +// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo +type sfiIdx struct { + name string + index int +} + +// do not recurse if a containing type refers to an embedded type +// which refers back to its containing type (via a pointer). +// The second time this back-reference happens, break out, +// so as not to cause an infinite loop. +const rgetMaxRecursion = 2 + +// Anecdotally, we believe most types have <= 12 fields. +// Java's PMD rules set TooManyFields threshold to 15. +const rgetPoolTArrayLen = 12 + +type rgetT struct { + fNames []string + encNames []string + etypes []uintptr + sfis []*structFieldInfo +} + +type rgetPoolT struct { + fNames [rgetPoolTArrayLen]string + encNames [rgetPoolTArrayLen]string + etypes [rgetPoolTArrayLen]uintptr + sfis [rgetPoolTArrayLen]*structFieldInfo + sfiidx [rgetPoolTArrayLen]sfiIdx +} + +var rgetPool = sync.Pool{ + New: func() interface{} { return new(rgetPoolT) }, +} + +type containerStateRecv interface { + sendContainerState(containerState) +} + +// mirror json.Marshaler and json.Unmarshaler here, +// so we don't import the encoding/json package +type jsonMarshaler interface { + MarshalJSON() ([]byte, error) +} +type jsonUnmarshaler interface { + UnmarshalJSON([]byte) error +} + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + rawTyp = reflect.TypeOf(Raw{}) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + + binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + + textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + + jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() + jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() + + selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() + + uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() + rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() + rawTypId = reflect.ValueOf(rawTyp).Pointer() + intfTypId = reflect.ValueOf(intfTyp).Pointer() + timeTypId = reflect.ValueOf(timeTyp).Pointer() + stringTypId = reflect.ValueOf(stringTyp).Pointer() + + mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() + mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() + intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() + // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() + + intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) + uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + chkOvf checkOverflow + + noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo") +) + +var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) + +// Selfer defines methods by which a value can encode or decode itself. +// +// Any type which implements Selfer will be able to encode or decode itself. +// Consequently, during (en|de)code, this takes precedence over +// (text|binary)(M|Unm)arshal or extension support. +type Selfer interface { + CodecEncodeSelf(*Encoder) + CodecDecodeSelf(*Decoder) +} + +// MapBySlice represents a slice which should be encoded as a map in the stream. +// The slice contains a sequence of key-value pairs. +// This affords storing a map in a specific sequence in the stream. +// +// The support of MapBySlice affords the following: +// - A slice type which implements MapBySlice will be encoded as a map +// - A slice can be decoded from a map in the stream +type MapBySlice interface { + MapBySlice() +} + +// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +// +// BasicHandle encapsulates the common options and extension functions. +type BasicHandle struct { + // TypeInfos is used to get the type info for any type. + // + // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json + TypeInfos *TypeInfos + + extHandle + EncodeOptions + DecodeOptions +} + +func (x *BasicHandle) getBasicHandle() *BasicHandle { + return x +} + +func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + if x.TypeInfos != nil { + return x.TypeInfos.get(rtid, rt) + } + return defTypeInfos.get(rtid, rt) +} + +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + getBasicHandle() *BasicHandle + newEncDriver(w *Encoder) encDriver + newDecDriver(r *Decoder) decDriver + isBinary() bool +} + +// Raw represents raw formatted bytes. +// We "blindly" store it during encode and store the raw bytes during decode. +// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set. +type Raw []byte + +// RawExt represents raw unprocessed extension data. +// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag. +// +// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value. +type RawExt struct { + Tag uint64 + // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value. + // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types + Data []byte + // Value represents the extension, if Data is nil. + // Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. + Value interface{} +} + +// BytesExt handles custom (de)serialization of types to/from []byte. +// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. +type BytesExt interface { + // WriteExt converts a value to a []byte. + // + // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. + WriteExt(v interface{}) []byte + + // ReadExt updates a value from a []byte. + ReadExt(dst interface{}, src []byte) +} + +// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. +// The Encoder or Decoder will then handle the further (de)serialization of that known type. +// +// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. +type InterfaceExt interface { + // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64. + // + // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. + ConvertExt(v interface{}) interface{} + + // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time. + UpdateExt(dst interface{}, src interface{}) +} + +// Ext handles custom (de)serialization of custom types / extensions. +type Ext interface { + BytesExt + InterfaceExt +} + +// addExtWrapper is a wrapper implementation to support former AddExt exported method. +type addExtWrapper struct { + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +func (x addExtWrapper) WriteExt(v interface{}) []byte { + bs, err := x.encFn(reflect.ValueOf(v)) + if err != nil { + panic(err) + } + return bs +} + +func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { + if err := x.decFn(reflect.ValueOf(v), bs); err != nil { + panic(err) + } +} + +func (x addExtWrapper) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} + +func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { + x.ReadExt(dest, v.([]byte)) +} + +type setExtWrapper struct { + b BytesExt + i InterfaceExt +} + +func (x *setExtWrapper) WriteExt(v interface{}) []byte { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + } + return x.b.WriteExt(v) +} + +func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + + } + x.b.ReadExt(v, bs) +} + +func (x *setExtWrapper) ConvertExt(v interface{}) interface{} { + if x.i == nil { + panic("InterfaceExt.ConvertExt is not supported") + + } + return x.i.ConvertExt(v) +} + +func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) { + if x.i == nil { + panic("InterfaceExxt.UpdateExt is not supported") + + } + x.i.UpdateExt(dest, v) +} + +// type errorString string +// func (x errorString) Error() string { return string(x) } + +type binaryEncodingType struct{} + +func (_ binaryEncodingType) isBinary() bool { return true } + +type textEncodingType struct{} + +func (_ textEncodingType) isBinary() bool { return false } + +// noBuiltInTypes is embedded into many types which do not support builtins +// e.g. msgpack, simple, cbor. +type noBuiltInTypes struct{} + +func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool { return false } +func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} +func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} + +type noStreamingCodec struct{} + +func (_ noStreamingCodec) CheckBreak() bool { return false } + +// bigenHelper. +// Users must already slice the x completely, because we will not reslice. +type bigenHelper struct { + x []byte // must be correctly sliced to appropriate len. slicing is a cost. + w encWriter +} + +func (z bigenHelper) writeUint16(v uint16) { + bigen.PutUint16(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint32(v uint32) { + bigen.PutUint32(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint64(v uint64) { + bigen.PutUint64(z.x, v) + z.w.writeb(z.x) +} + +type extTypeTagFn struct { + rtid uintptr + rt reflect.Type + tag uint64 + ext Ext +} + +type extHandle []extTypeTagFn + +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. +// +// AddExt registes an encode and decode function for a reflect.Type. +// AddExt internally calls SetExt. +// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. +func (o *extHandle) AddExt( + rt reflect.Type, tag byte, + encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error, +) (err error) { + if encfn == nil || decfn == nil { + return o.SetExt(rt, uint64(tag), nil) + } + return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) +} + +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. +// +// Note that the type must be a named type, and specifically not +// a pointer or Interface. An error is returned if that is not honored. +// +// To Deregister an ext, call SetExt with nil Ext +func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { + // o is a pointer, because we may need to initialize it + if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T", + reflect.Zero(rt).Interface()) + return + } + + rtid := reflect.ValueOf(rt).Pointer() + for _, v := range *o { + if v.rtid == rtid { + v.tag, v.ext = tag, ext + return + } + } + + if *o == nil { + *o = make([]extTypeTagFn, 0, 4) + } + *o = append(*o, extTypeTagFn{rtid, rt, tag, ext}) + return +} + +func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { + var v *extTypeTagFn + for i := range o { + v = &o[i] + if v.rtid == rtid { + return v + } + } + return nil +} + +func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { + var v *extTypeTagFn + for i := range o { + v = &o[i] + if v.tag == tag { + return v + } + } + return nil +} + +type structFieldInfo struct { + encName string // encode name + fieldName string // field name + + // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. + + is []int // (recursive/embedded) field index in struct + i int16 // field index in struct + omitEmpty bool + toArray bool // if field is _struct, is the toArray set? +} + +// func (si *structFieldInfo) isZero() bool { +// return si.encName == "" && len(si.is) == 0 && si.i == 0 && !si.omitEmpty && !si.toArray +// } + +// rv returns the field of the struct. +// If anonymous, it returns an Invalid +func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) { + if si.i != -1 { + v = v.Field(int(si.i)) + return v + } + // replicate FieldByIndex + for _, x := range si.is { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !update { + return + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = v.Field(x) + } + return v +} + +func (si *structFieldInfo) setToZeroValue(v reflect.Value) { + if si.i != -1 { + v = v.Field(int(si.i)) + v.Set(reflect.Zero(v.Type())) + // v.Set(reflect.New(v.Type()).Elem()) + // v.Set(reflect.New(v.Type())) + } else { + // replicate FieldByIndex + for _, x := range si.is { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return + } + v = v.Elem() + } + v = v.Field(x) + } + v.Set(reflect.Zero(v.Type())) + } +} + +func parseStructFieldInfo(fname string, stag string) *structFieldInfo { + // if fname == "" { + // panic(noFieldNameToStructFieldInfoErr) + // } + si := structFieldInfo{ + encName: fname, + } + + if stag != "" { + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + if s == "omitempty" { + si.omitEmpty = true + } else if s == "toarray" { + si.toArray = true + } + } + } + } + // si.encNameBs = []byte(si.encName) + return &si +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { + return len(p) +} + +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName +} + +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +// typeInfo keeps information about each type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + rt reflect.Type + rtid uintptr + + numMeth uint16 // number of methods + + // baseId gives pointer to the base reflect.Type, after deferencing + // the pointers. E.g. base type of ***time.Time is time.Time. + base reflect.Type + baseId uintptr + baseIndir int8 // number of indirections to get to base + + mbs bool // base type (T or *T) is a MapBySlice + + bm bool // base type (T or *T) is a binaryMarshaler + bunm bool // base type (T or *T) is a binaryUnmarshaler + bmIndir int8 // number of indirections to get to binaryMarshaler type + bunmIndir int8 // number of indirections to get to binaryUnmarshaler type + + tm bool // base type (T or *T) is a textMarshaler + tunm bool // base type (T or *T) is a textUnmarshaler + tmIndir int8 // number of indirections to get to textMarshaler type + tunmIndir int8 // number of indirections to get to textUnmarshaler type + + jm bool // base type (T or *T) is a jsonMarshaler + junm bool // base type (T or *T) is a jsonUnmarshaler + jmIndir int8 // number of indirections to get to jsonMarshaler type + junmIndir int8 // number of indirections to get to jsonUnmarshaler type + + cs bool // base type (T or *T) is a Selfer + csIndir int8 // number of indirections to get to Selfer type + + toArray bool // whether this (struct) type should be encoded as an array +} + +func (ti *typeInfo) indexForEncName(name string) int { + // NOTE: name may be a stringView, so don't pass it to another function. + //tisfi := ti.sfi + const binarySearchThreshold = 16 + if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { + // linear search. faster than binary search in my testing up to 16-field structs. + for i, si := range ti.sfi { + if si.encName == name { + return i + } + } + } else { + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h + } + } + if i < sfilen && ti.sfi[i].encName == name { + return i + } + } + return -1 +} + +// TypeInfos caches typeInfo for each type on first inspection. +// +// It is configured with a set of tag keys, which are used to get +// configuration for the type. +type TypeInfos struct { + infos map[uintptr]*typeInfo + mu sync.RWMutex + tags []string +} + +// NewTypeInfos creates a TypeInfos given a set of struct tags keys. +// +// This allows users customize the struct tag keys which contain configuration +// of their types. +func NewTypeInfos(tags []string) *TypeInfos { + return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)} +} + +func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { + // check for tags: codec, json, in that order. + // this allows seamless support for many configured structs. + for _, x := range x.tags { + s = t.Get(x) + if s != "" { + return s + } + } + return +} + +func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + var ok bool + x.mu.RLock() + pti, ok = x.infos[rtid] + x.mu.RUnlock() + if ok { + return + } + + // do not hold lock while computing this. + // it may lead to duplication, but that's ok. + ti := typeInfo{rt: rt, rtid: rtid} + ti.numMeth = uint16(rt.NumMethod()) + + var indir int8 + if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { + ti.bm, ti.bmIndir = true, indir + } + if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { + ti.bunm, ti.bunmIndir = true, indir + } + if ok, indir = implementsIntf(rt, textMarshalerTyp); ok { + ti.tm, ti.tmIndir = true, indir + } + if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok { + ti.tunm, ti.tunmIndir = true, indir + } + if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok { + ti.jm, ti.jmIndir = true, indir + } + if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok { + ti.junm, ti.junmIndir = true, indir + } + if ok, indir = implementsIntf(rt, selferTyp); ok { + ti.cs, ti.csIndir = true, indir + } + if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { + ti.mbs = true + } + + pt := rt + var ptIndir int8 + // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } + for pt.Kind() == reflect.Ptr { + pt = pt.Elem() + ptIndir++ + } + if ptIndir == 0 { + ti.base = rt + ti.baseId = rtid + } else { + ti.base = pt + ti.baseId = reflect.ValueOf(pt).Pointer() + ti.baseIndir = ptIndir + } + + if rt.Kind() == reflect.Struct { + var omitEmpty bool + if f, ok := rt.FieldByName(structInfoFieldName); ok { + siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) + ti.toArray = siInfo.toArray + omitEmpty = siInfo.omitEmpty + } + pi := rgetPool.Get() + pv := pi.(*rgetPoolT) + pv.etypes[0] = ti.baseId + vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + x.rget(rt, rtid, omitEmpty, nil, &vv) + ti.sfip, ti.sfi = rgetResolveSFI(vv.sfis, pv.sfiidx[:0]) + rgetPool.Put(pi) + } + // sfi = sfip + + x.mu.Lock() + if pti, ok = x.infos[rtid]; !ok { + pti = &ti + x.infos[rtid] = pti + } + x.mu.Unlock() + return +} + +func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, + indexstack []int, pv *rgetT, +) { + // Read up fields and store how to access the value. + // + // It uses go's rules for message selectors, + // which say that the field with the shallowest depth is selected. + // + // Note: we consciously use slices, not a map, to simulate a set. + // Typically, types have < 16 fields, + // and iteration using equals is faster than maps there + +LOOP: + for j, jlen := 0, rt.NumField(); j < jlen; j++ { + f := rt.Field(j) + fkind := f.Type.Kind() + // skip if a func type, or is unexported, or structTag value == "-" + switch fkind { + case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer: + continue LOOP + } + + // if r1, _ := utf8.DecodeRuneInString(f.Name); + // r1 == utf8.RuneError || !unicode.IsUpper(r1) { + if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded + continue + } + stag := x.structTag(f.Tag) + if stag == "-" { + continue + } + var si *structFieldInfo + // if anonymous and no struct tag (or it's blank), + // and a struct (or pointer to struct), inline it. + if f.Anonymous && fkind != reflect.Interface { + doInline := stag == "" + if !doInline { + si = parseStructFieldInfo("", stag) + doInline = si.encName == "" + // doInline = si.isZero() + } + if doInline { + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if ft.Kind() == reflect.Struct { + // if etypes contains this, don't call rget again (as fields are already seen here) + ftid := reflect.ValueOf(ft).Pointer() + // We cannot recurse forever, but we need to track other field depths. + // So - we break if we see a type twice (not the first time). + // This should be sufficient to handle an embedded type that refers to its + // owning type, which then refers to its embedded type. + processIt := true + numk := 0 + for _, k := range pv.etypes { + if k == ftid { + numk++ + if numk == rgetMaxRecursion { + processIt = false + break + } + } + } + if processIt { + pv.etypes = append(pv.etypes, ftid) + indexstack2 := make([]int, len(indexstack)+1) + copy(indexstack2, indexstack) + indexstack2[len(indexstack)] = j + // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + x.rget(ft, ftid, omitEmpty, indexstack2, pv) + } + continue + } + } + } + + // after the anonymous dance: if an unexported field, skip + if f.PkgPath != "" { // unexported + continue + } + + if f.Name == "" { + panic(noFieldNameToStructFieldInfoErr) + } + + pv.fNames = append(pv.fNames, f.Name) + + if si == nil { + si = parseStructFieldInfo(f.Name, stag) + } else if si.encName == "" { + si.encName = f.Name + } + si.fieldName = f.Name + + pv.encNames = append(pv.encNames, si.encName) + + // si.ikind = int(f.Type.Kind()) + if len(indexstack) == 0 { + si.i = int16(j) + } else { + si.i = -1 + si.is = make([]int, len(indexstack)+1) + copy(si.is, indexstack) + si.is[len(indexstack)] = j + // si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + } + + if omitEmpty { + si.omitEmpty = true + } + pv.sfis = append(pv.sfis, si) + } +} + +// resolves the struct field info got from a call to rget. +// Returns a trimmed, unsorted and sorted []*structFieldInfo. +func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) { + var n int + for i, v := range x { + xn := v.encName //TODO: fieldName or encName? use encName for now. + var found bool + for j, k := range pv { + if k.name == xn { + // one of them must be reset to nil, and the index updated appropriately to the other one + if len(v.is) == len(x[k.index].is) { + } else if len(v.is) < len(x[k.index].is) { + pv[j].index = i + if x[k.index] != nil { + x[k.index] = nil + n++ + } + } else { + if x[i] != nil { + x[i] = nil + n++ + } + } + found = true + break + } + } + if !found { + pv = append(pv, sfiIdx{xn, i}) + } + } + + // remove all the nils + y = make([]*structFieldInfo, len(x)-n) + n = 0 + for _, v := range x { + if v == nil { + continue + } + y[n] = v + n++ + } + + z = make([]*structFieldInfo, len(y)) + copy(z, y) + sort.Sort(sfiSortedByEncName(z)) + return +} + +func panicToErr(err *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + //debug.PrintStack() + panicValToErr(x, err) + } + } +} + +// func doPanic(tag string, format string, params ...interface{}) { +// params2 := make([]interface{}, len(params)+1) +// params2[0] = tag +// copy(params2[1:], params) +// panic(fmt.Errorf("%s: "+format, params2...)) +// } + +func isImmutableKind(k reflect.Kind) (v bool) { + return false || + k == reflect.Int || + k == reflect.Int8 || + k == reflect.Int16 || + k == reflect.Int32 || + k == reflect.Int64 || + k == reflect.Uint || + k == reflect.Uint8 || + k == reflect.Uint16 || + k == reflect.Uint32 || + k == reflect.Uint64 || + k == reflect.Uintptr || + k == reflect.Float32 || + k == reflect.Float64 || + k == reflect.Bool || + k == reflect.String +} + +// these functions must be inlinable, and not call anybody +type checkOverflow struct{} + +func (_ checkOverflow) Float32(f float64) (overflow bool) { + if f < 0 { + f = -f + } + return math.MaxFloat32 < f && f <= math.MaxFloat64 +} + +func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} + +func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} + +func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) { + //e.g. -127 to 128 for int8 + pos := (v >> 63) == 0 + ui2 := v & 0x7fffffffffffffff + if pos { + if ui2 > math.MaxInt64 { + overflow = true + return + } + } else { + if ui2 > math.MaxInt64-1 { + overflow = true + return + } + } + i = int64(v) + return +} + +// ------------------ SORT ----------------- + +func isNaN(f float64) bool { return f != f } + +// ----------------------- + +type intSlice []int64 +type uintSlice []uint64 +type floatSlice []float64 +type boolSlice []bool +type stringSlice []string +type bytesSlice [][]byte + +func (p intSlice) Len() int { return len(p) } +func (p intSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintSlice) Len() int { return len(p) } +func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatSlice) Len() int { return len(p) } +func (p floatSlice) Less(i, j int) bool { + return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) +} +func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringSlice) Len() int { return len(p) } +func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p bytesSlice) Len() int { return len(p) } +func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 } +func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolSlice) Len() int { return len(p) } +func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] } +func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// --------------------- + +type intRv struct { + v int64 + r reflect.Value +} +type intRvSlice []intRv +type uintRv struct { + v uint64 + r reflect.Value +} +type uintRvSlice []uintRv +type floatRv struct { + v float64 + r reflect.Value +} +type floatRvSlice []floatRv +type boolRv struct { + v bool + r reflect.Value +} +type boolRvSlice []boolRv +type stringRv struct { + v string + r reflect.Value +} +type stringRvSlice []stringRv +type bytesRv struct { + v []byte + r reflect.Value +} +type bytesRvSlice []bytesRv + +func (p intRvSlice) Len() int { return len(p) } +func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintRvSlice) Len() int { return len(p) } +func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatRvSlice) Len() int { return len(p) } +func (p floatRvSlice) Less(i, j int) bool { + return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v) +} +func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringRvSlice) Len() int { return len(p) } +func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p bytesRvSlice) Len() int { return len(p) } +func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolRvSlice) Len() int { return len(p) } +func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v } +func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type bytesI struct { + v []byte + i interface{} +} + +type bytesISlice []bytesI + +func (p bytesISlice) Len() int { return len(p) } +func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type set []uintptr + +func (s *set) add(v uintptr) (exists bool) { + // e.ci is always nil, or len >= 1 + // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: %v\n", v, exists) }() + x := *s + if x == nil { + x = make([]uintptr, 1, 8) + x[0] = v + *s = x + return + } + // typically, length will be 1. make this perform. + if len(x) == 1 { + if j := x[0]; j == 0 { + x[0] = v + } else if j == v { + exists = true + } else { + x = append(x, v) + *s = x + } + return + } + // check if it exists + for _, j := range x { + if j == v { + exists = true + return + } + } + // try to replace a "deleted" slot + for i, j := range x { + if j == 0 { + x[i] = v + return + } + } + // if unable to replace deleted slot, just append it. + x = append(x, v) + *s = x + return +} + +func (s *set) remove(v uintptr) (exists bool) { + // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", v, exists) }() + x := *s + if len(x) == 0 { + return + } + if len(x) == 1 { + if x[0] == v { + x[0] = 0 + } + return + } + for i, j := range x { + if j == v { + exists = true + x[i] = 0 // set it to 0, as way to delete it. + // copy(x[i:], x[i+1:]) + // x = x[:len(x)-1] + return + } + } + return +} diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go new file mode 100644 index 0000000..5d0727f --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_internal.go @@ -0,0 +1,242 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +func panicValToErr(panicVal interface{}, err *error) { + if panicVal == nil { + return + } + // case nil + switch xerr := panicVal.(type) { + case error: + *err = xerr + case string: + *err = errors.New(xerr) + default: + *err = fmt.Errorf("%v", panicVal) + } + return +} + +func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return hIsEmptyValue(v.Elem(), deref, checkStruct) + } else { + return v.IsNil() + } + case reflect.Struct: + if !checkStruct { + return false + } + // return true if all fields are empty. else return false. + // we cannot use equality check, because some fields may be maps/slices/etc + // and consequently the structs are not comparable. + // return v.Interface() == reflect.Zero(v.Type()).Interface() + for i, n := 0, v.NumField(); i < n; i++ { + if !hIsEmptyValue(v.Field(i), deref, checkStruct) { + return false + } + } + return true + } + return false +} + +func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + return hIsEmptyValue(v, deref, checkStruct) +} + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { + if typ == nil { + return + } + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if rt.Implements(iTyp) { + return true, indir + } + if p := rt; p.Kind() == reflect.Ptr { + indir++ + if indir >= math.MaxInt8 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue + } + break + } + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if typ.Kind() != reflect.Ptr { + // Not a pointer, but does the pointer work? + if reflect.PtrTo(typ).Implements(iTyp) { + return true, -1 + } + } + return false, 0 +} + +// validate that this function is correct ... +// culled from OGRE (Object-Oriented Graphics Rendering Engine) +// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) +func halfFloatToFloatBits(yy uint16) (d uint32) { + y := uint32(yy) + s := (y >> 15) & 0x01 + e := (y >> 10) & 0x1f + m := y & 0x03ff + + if e == 0 { + if m == 0 { // plu or minus 0 + return s << 31 + } else { // Denormalized number -- renormalize it + for (m & 0x00000400) == 0 { + m <<= 1 + e -= 1 + } + e += 1 + const zz uint32 = 0x0400 + m &= ^zz + } + } else if e == 31 { + if m == 0 { // Inf + return (s << 31) | 0x7f800000 + } else { // NaN + return (s << 31) | 0x7f800000 | (m << 13) + } + } + e = e + (127 - 15) + m = m << 13 + return (s << 31) | (e << 23) | m +} + +// GrowCap will return a new capacity for a slice, given the following: +// - oldCap: current capacity +// - unit: in-memory size of an element +// - num: number of elements to add +func growCap(oldCap, unit, num int) (newCap int) { + // appendslice logic (if cap < 1024, *2, else *1.25): + // leads to many copy calls, especially when copying bytes. + // bytes.Buffer model (2*cap + n): much better for bytes. + // smarter way is to take the byte-size of the appended element(type) into account + + // maintain 3 thresholds: + // t1: if cap <= t1, newcap = 2x + // t2: if cap <= t2, newcap = 1.75x + // t3: if cap <= t3, newcap = 1.5x + // else newcap = 1.25x + // + // t1, t2, t3 >= 1024 always. + // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) + // + // With this, appending for bytes increase by: + // 100% up to 4K + // 75% up to 8K + // 50% up to 16K + // 25% beyond that + + // unit can be 0 e.g. for struct{}{}; handle that appropriately + var t1, t2, t3 int // thresholds + if unit <= 1 { + t1, t2, t3 = 4*1024, 8*1024, 16*1024 + } else if unit < 16 { + t3 = 16 / unit * 1024 + t1 = t3 * 1 / 4 + t2 = t3 * 2 / 4 + } else { + t1, t2, t3 = 1024, 1024, 1024 + } + + var x int // temporary variable + + // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively + if oldCap <= t1 { // [0,t1] + x = 8 + } else if oldCap > t3 { // (t3,infinity] + x = 5 + } else if oldCap <= t2 { // (t1,t2] + x = 7 + } else { // (t2,t3] + x = 6 + } + newCap = x * oldCap / 4 + + if num > 0 { + newCap += num + } + + // ensure newCap is a multiple of 64 (if it is > 64) or 16. + if newCap > 64 { + if x = newCap % 64; x != 0 { + x = newCap / 64 + newCap = 64 * (x + 1) + } + } else { + if x = newCap % 16; x != 0 { + x = newCap / 16 + newCap = 16 * (x + 1) + } + } + return +} + +func expandSliceValue(s reflect.Value, num int) reflect.Value { + if num <= 0 { + return s + } + l0 := s.Len() + l1 := l0 + num // new slice length + if l1 < l0 { + panic("ExpandSlice: slice overflow") + } + c0 := s.Cap() + if l1 <= c0 { + return s.Slice(0, l1) + } + st := s.Type() + c1 := growCap(c0, int(st.Elem().Size()), num) + s2 := reflect.MakeSlice(st, l1, c1) + // println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1) + reflect.Copy(s2, s) + return s2 +} diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go new file mode 100644 index 0000000..8b06a00 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go @@ -0,0 +1,20 @@ +// +build !unsafe + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func stringView(v []byte) string { + return string(v) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func bytesView(v string) []byte { + return []byte(v) +} diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go new file mode 100644 index 0000000..0f596c7 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go @@ -0,0 +1,49 @@ +// +build unsafe + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "unsafe" +) + +// This file has unsafe variants of some helper methods. + +type unsafeString struct { + Data uintptr + Len int +} + +type unsafeSlice struct { + Data uintptr + Len int + Cap int +} + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func stringView(v []byte) string { + if len(v) == 0 { + return "" + } + + bx := (*unsafeSlice)(unsafe.Pointer(&v)) + sx := unsafeString{bx.Data, bx.Len} + return *(*string)(unsafe.Pointer(&sx)) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +func bytesView(v string) []byte { + if len(v) == 0 { + return zeroByteSlice + } + + sx := (*unsafeString)(unsafe.Pointer(&v)) + bx := unsafeSlice{sx.Data, sx.Len, sx.Len} + return *(*[]byte)(unsafe.Pointer(&bx)) +} diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go new file mode 100644 index 0000000..463cae3 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/json.go @@ -0,0 +1,1247 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// By default, this json support uses base64 encoding for bytes, because you cannot +// store and read any arbitrary string in json (only unicode). +// However, the user can configre how to encode/decode bytes. +// +// This library specifically supports UTF-8 for encoding and decoding only. +// +// Note that the library will happily encode/decode things which are not valid +// json e.g. a map[int64]string. We do it for consistency. With valid json, +// we will encode and decode appropriately. +// Users can specify their map type if necessary to force it. +// +// Note: +// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. +// We implement it here. +// - Also, strconv.ParseXXX for floats and integers +// - only works on strings resulting in unnecessary allocation and []byte-string conversion. +// - it does a lot of redundant checks, because json numbers are simpler that what it supports. +// - We parse numbers (floats and integers) directly here. +// We only delegate parsing floats if it is a hairy float which could cause a loss of precision. +// In that case, we delegate to strconv.ParseFloat. +// +// Note: +// - encode does not beautify. There is no whitespace when encoding. +// - rpc calls which take single integer arguments or write single numeric arguments will need care. + +// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver +// MUST not call one-another. + +import ( + "bytes" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "unicode/utf16" + "unicode/utf8" +) + +//-------------------------------- + +var ( + jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} + + jsonFloat64Pow10 = [...]float64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + 1e20, 1e21, 1e22, + } + + jsonUint64Pow10 = [...]uint64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + } + + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs, jsonSpaces string +) + +const ( + // jsonUnreadAfterDecNum controls whether we unread after decoding a number. + // + // instead of unreading, just update d.tok (iff it's not a whitespace char) + // However, doing this means that we may HOLD onto some data which belongs to another stream. + // Thus, it is safest to unread the data when done. + // keep behind a constant flag for now. + jsonUnreadAfterDecNum = true + + // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: + // - If we see first character of null, false or true, + // do not validate subsequent characters. + // - e.g. if we see a n, assume null and skip next 3 characters, + // and do not validate they are ull. + // P.S. Do not expect a significant decoding boost from this. + jsonValidateSymbols = true + + // if jsonTruncateMantissa, truncate mantissa if trailing 0's. + // This is important because it could allow some floats to be decoded without + // deferring to strconv.ParseFloat. + jsonTruncateMantissa = true + + // if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow + jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) + + // if mantissa >= jsonNumUintMaxVal, this is an overflow + jsonNumUintMaxVal = 1< 1<<53 || v < -(1<<53)) { + e.w.writen1('"') + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) + e.w.writen1('"') + return + } + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriver) EncodeUint(v uint64) { + if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 { + e.w.writen1('"') + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) + e.w.writen1('"') + return + } + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + if v := ext.ConvertExt(rv); v == nil { + e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + // only encodes re.Value (never re.Data) + if re.Value == nil { + e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *jsonEncDriver) EncodeArrayStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriver) EncodeMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { + // e.w.writestr(strconv.Quote(v)) + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeSymbol(v string) { + // e.EncodeString(c_UTF8, v) + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if c == c_RAW && e.se.i != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + if c == c_RAW { + slen := base64.StdEncoding.EncodedLen(len(v)) + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + base64.StdEncoding.Encode(e.bs, v) + e.w.writen1('"') + e.w.writeb(e.bs) + e.w.writen1('"') + } else { + // e.EncodeString(c, string(v)) + e.quoteStr(stringView(v)) + } +} + +func (e *jsonEncDriver) EncodeAsis(v []byte) { + e.w.writeb(v) +} + +func (e *jsonEncDriver) quoteStr(s string) { + // adapted from std pkg encoding/json + const hex = "0123456789abcdef" + w := e.w + w.writen1('"') + start := 0 + for i := 0; i < len(s); { + // encode all bytes < 0x20 (except \r, \n). + // also encode < > & to prevent security holes when served to some browsers. + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + w.writestr(s[start:i]) + } + switch b { + case '\\', '"': + w.writen2('\\', b) + case '\n': + w.writen2('\\', 'n') + case '\r': + w.writen2('\\', 'r') + case '\b': + w.writen2('\\', 'b') + case '\f': + w.writen2('\\', 'f') + case '\t': + w.writen2('\\', 't') + case '<', '>', '&': + if e.h.HTMLCharsAsIs { + w.writen1(b) + } else { + w.writestr(`\u00`) + w.writen2(hex[b>>4], hex[b&0xF]) + } + default: + w.writestr(`\u00`) + w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. + // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. + if c == '\u2028' || c == '\u2029' { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\u202`) + w.writen1(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + w.writestr(s[start:]) + } + w.writen1('"') +} + +//-------------------------------- + +type jsonNum struct { + // bytes []byte // may have [+-.eE0-9] + mantissa uint64 // where mantissa ends, and maybe dot begins. + exponent int16 // exponent value. + manOverflow bool + neg bool // started with -. No initial sign in the bytes above. + dot bool // has dot + explicitExponent bool // explicit exponent +} + +func (x *jsonNum) reset() { + x.manOverflow = false + x.neg = false + x.dot = false + x.explicitExponent = false + x.mantissa = 0 + x.exponent = 0 +} + +// uintExp is called only if exponent > 0. +func (x *jsonNum) uintExp() (n uint64, overflow bool) { + n = x.mantissa + e := x.exponent + if e >= int16(len(jsonUint64Pow10)) { + overflow = true + return + } + n *= jsonUint64Pow10[e] + if n < x.mantissa || n > jsonNumUintMaxVal { + overflow = true + return + } + return + // for i := int16(0); i < e; i++ { + // if n >= jsonNumUintCutoff { + // overflow = true + // return + // } + // n *= 10 + // } + // return +} + +// these constants are only used withn floatVal. +// They are brought out, so that floatVal can be inlined. +const ( + jsonUint64MantissaBits = 52 + jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1 +) + +func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) { + // We do not want to lose precision. + // Consequently, we will delegate to strconv.ParseFloat if any of the following happen: + // - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits) + // We expect up to 99.... (19 digits) + // - The mantissa cannot fit into a 52 bits of uint64 + // - The exponent is beyond our scope ie beyong 22. + parseUsingStrConv = x.manOverflow || + x.exponent > jsonMaxExponent || + (x.exponent < 0 && -(x.exponent) > jsonMaxExponent) || + x.mantissa>>jsonUint64MantissaBits != 0 + + if parseUsingStrConv { + return + } + + // all good. so handle parse here. + f = float64(x.mantissa) + // fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f) + if x.neg { + f = -f + } + if x.exponent > 0 { + f *= jsonFloat64Pow10[x.exponent] + } else if x.exponent < 0 { + f /= jsonFloat64Pow10[-x.exponent] + } + return +} + +type jsonDecDriver struct { + noBuiltInTypes + d *Decoder + h *JsonHandle + r decReader + + c containerState + // tok is used to store the token read right after skipWhiteSpace. + tok uint8 + + bstr [8]byte // scratch used for string \UXXX parsing + b [64]byte // scratch, used for parsing strings or numbers + b2 [64]byte // scratch, used only for decodeBytes (after base64) + bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. + + se setExtWrapper + + n jsonNum +} + +func jsonIsWS(b byte) bool { + return b == ' ' || b == '\t' || b == '\r' || b == '\n' +} + +// // This will skip whitespace characters and return the next byte to read. +// // The next byte determines what the value will be one of. +// func (d *jsonDecDriver) skipWhitespace() { +// // fast-path: do not enter loop. Just check first (in case no whitespace). +// b := d.r.readn1() +// if jsonIsWS(b) { +// r := d.r +// for b = r.readn1(); jsonIsWS(b); b = r.readn1() { +// } +// } +// d.tok = b +// } + +func (d *jsonDecDriver) uncacheRead() { + if d.tok != 0 { + d.r.unreadn1() + d.tok = 0 + } +} + +func (d *jsonDecDriver) sendContainerState(c containerState) { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + var xc uint8 // char expected + if c == containerMapKey { + if d.c != containerMapStart { + xc = ',' + } + } else if c == containerMapValue { + xc = ':' + } else if c == containerMapEnd { + xc = '}' + } else if c == containerArrayElem { + if d.c != containerArrayStart { + xc = ',' + } + } else if c == containerArrayEnd { + xc = ']' + } + if xc != 0 { + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = c +} + +func (d *jsonDecDriver) CheckBreak() bool { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok == '}' || d.tok == ']' { + // d.tok = 0 // only checking, not consuming + return true + } + return false +} + +func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) { + bs := d.r.readx(int(toIdx - fromIdx)) + d.tok = 0 + if jsonValidateSymbols { + if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) { + d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs) + return + } + } +} + +func (d *jsonDecDriver) TryDecodeAsNil() bool { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok == 'n' { + d.readStrIdx(10, 13) // ull + return true + } + return false +} + +func (d *jsonDecDriver) DecodeBool() bool { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok == 'f' { + d.readStrIdx(5, 9) // alse + return false + } + if d.tok == 't' { + d.readStrIdx(1, 4) // rue + return true + } + d.d.errorf("json: decode bool: got first char %c", d.tok) + return false // "unreachable" +} + +func (d *jsonDecDriver) ReadMapStart() int { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok != '{' { + d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok) + } + d.tok = 0 + d.c = containerMapStart + return -1 +} + +func (d *jsonDecDriver) ReadArrayStart() int { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if d.tok != '[' { + d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok) + } + d.tok = 0 + d.c = containerArrayStart + return -1 +} + +func (d *jsonDecDriver) ContainerType() (vt valueType) { + // check container type by checking the first char + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + if b := d.tok; b == '{' { + return valueTypeMap + } else if b == '[' { + return valueTypeArray + } else if b == 'n' { + return valueTypeNil + } else if b == '"' { + return valueTypeString + } + return valueTypeUnset + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // return false // "unreachable" +} + +func (d *jsonDecDriver) decNum(storeBytes bool) { + // If it is has a . or an e|E, decode as a float; else decode as an int. + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + b := d.tok + var str bool + if b == '"' { + str = true + b = d.r.readn1() + } + if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) { + d.d.errorf("json: decNum: got first char '%c'", b) + return + } + d.tok = 0 + + const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) + const jsonNumUintMaxVal = 1<= jsonNumUintCutoff { + n.manOverflow = true + break + } + v := uint64(b - '0') + n.mantissa *= 10 + if v != 0 { + n1 := n.mantissa + v + if n1 < n.mantissa || n1 > jsonNumUintMaxVal { + n.manOverflow = true // n+v overflows + break + } + n.mantissa = n1 + } + case 6: + state = 7 + fallthrough + case 7: + if !(b == '0' && e == 0) { + e = e*10 + int16(b-'0') + } + default: + break LOOP + } + case '"': + if str { + if storeBytes { + d.bs = append(d.bs, '"') + } + b, eof = r.readn1eof() + } + break LOOP + default: + break LOOP + } + if storeBytes { + d.bs = append(d.bs, b) + } + b, eof = r.readn1eof() + } + + if jsonTruncateMantissa && n.mantissa != 0 { + for n.mantissa%10 == 0 { + n.mantissa /= 10 + n.exponent++ + } + } + + if e != 0 { + if eNeg { + n.exponent -= e + } else { + n.exponent += e + } + } + + // d.n = n + + if !eof { + if jsonUnreadAfterDecNum { + r.unreadn1() + } else { + if !jsonIsWS(b) { + d.tok = b + } + } + } + // fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n", + // n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex) + return +} + +func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) { + d.decNum(false) + n := &d.n + if n.manOverflow { + d.d.errorf("json: overflow integer after: %v", n.mantissa) + return + } + var u uint64 + if n.exponent == 0 { + u = n.mantissa + } else if n.exponent < 0 { + d.d.errorf("json: fractional integer") + return + } else if n.exponent > 0 { + var overflow bool + if u, overflow = n.uintExp(); overflow { + d.d.errorf("json: overflow integer") + return + } + } + i = int64(u) + if n.neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) + return + } + // fmt.Printf("DecodeInt: %v\n", i) + return +} + +// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number +func (d *jsonDecDriver) floatVal() (f float64) { + f, useStrConv := d.n.floatVal() + if useStrConv { + var err error + if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil { + panic(fmt.Errorf("parse float: %s, %v", d.bs, err)) + } + if d.n.neg { + f = -f + } + } + return +} + +func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) { + d.decNum(false) + n := &d.n + if n.neg { + d.d.errorf("json: unsigned integer cannot be negative") + return + } + if n.manOverflow { + d.d.errorf("json: overflow integer after: %v", n.mantissa) + return + } + if n.exponent == 0 { + u = n.mantissa + } else if n.exponent < 0 { + d.d.errorf("json: fractional integer") + return + } else if n.exponent > 0 { + var overflow bool + if u, overflow = n.uintExp(); overflow { + d.d.errorf("json: overflow integer") + return + } + } + if chkOvf.Uint(u, bitsize) { + d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) + return + } + // fmt.Printf("DecodeUint: %v\n", u) + return +} + +func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + d.decNum(true) + f = d.floatVal() + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("json: overflow float32: %v, %s", f, d.bs) + return + } + return +} + +func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if ext == nil { + re := rv.(*RawExt) + re.Tag = xtag + d.d.decode(&re.Value) + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + return +} + +func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. + if !isstring && d.se.i != nil { + bsOut = bs + d.DecodeExt(&bsOut, 0, &d.se) + return + } + d.appendStringAsBytes() + // if isstring, then just return the bytes, even if it is using the scratch buffer. + // the bytes will be converted to a string as needed. + if isstring { + return d.bs + } + // if appendStringAsBytes returned a zero-len slice, then treat as nil. + // This should only happen for null, and "". + if len(d.bs) == 0 { + return nil + } + bs0 := d.bs + slen := base64.StdEncoding.DecodedLen(len(bs0)) + if slen <= cap(bs) { + bsOut = bs[:slen] + } else if zerocopy && slen <= cap(d.b2) { + bsOut = d.b2[:slen] + } else { + bsOut = make([]byte, slen) + } + slen2, err := base64.StdEncoding.Decode(bsOut, bs0) + if err != nil { + d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err) + return nil + } + if slen != slen2 { + bsOut = bsOut[:slen2] + } + return +} + +func (d *jsonDecDriver) DecodeString() (s string) { + d.appendStringAsBytes() + // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key + if d.c == containerMapKey { + return d.d.string(d.bs) + } + return string(d.bs) +} + +func (d *jsonDecDriver) appendStringAsBytes() { + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + + // handle null as a string + if d.tok == 'n' { + d.readStrIdx(10, 13) // ull + d.bs = d.bs[:0] + return + } + + if d.tok != '"' { + d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) + } + d.tok = 0 + + v := d.bs[:0] + var c uint8 + r := d.r + for { + c = r.readn1() + if c == '"' { + break + } else if c == '\\' { + c = r.readn1() + switch c { + case '"', '\\', '/', '\'': + v = append(v, c) + case 'b': + v = append(v, '\b') + case 'f': + v = append(v, '\f') + case 'n': + v = append(v, '\n') + case 'r': + v = append(v, '\r') + case 't': + v = append(v, '\t') + case 'u': + rr := d.jsonU4(false) + // fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr)) + if utf16.IsSurrogate(rr) { + rr = utf16.DecodeRune(rr, d.jsonU4(true)) + } + w2 := utf8.EncodeRune(d.bstr[:], rr) + v = append(v, d.bstr[:w2]...) + default: + d.d.errorf("json: unsupported escaped value: %c", c) + } + } else { + v = append(v, c) + } + } + d.bs = v +} + +func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune { + r := d.r + if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') { + d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`) + return 0 + } + // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64) + var u uint32 + for i := 0; i < 4; i++ { + v := r.readn1() + if '0' <= v && v <= '9' { + v = v - '0' + } else if 'a' <= v && v <= 'z' { + v = v - 'a' + 10 + } else if 'A' <= v && v <= 'Z' { + v = v - 'A' + 10 + } else { + d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v) + return 0 + } + u = u*16 + uint32(v) + } + return rune(u) +} + +func (d *jsonDecDriver) DecodeNaked() { + z := &d.d.n + // var decodeFurther bool + + if d.tok == 0 { + var b byte + r := d.r + for b = r.readn1(); jsonIsWS(b); b = r.readn1() { + } + d.tok = b + } + switch d.tok { + case 'n': + d.readStrIdx(10, 13) // ull + z.v = valueTypeNil + case 'f': + d.readStrIdx(5, 9) // alse + z.v = valueTypeBool + z.b = false + case 't': + d.readStrIdx(1, 4) // rue + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap + // d.tok = 0 // don't consume. kInterfaceNaked will call ReadMapStart + // decodeFurther = true + case '[': + z.v = valueTypeArray + // d.tok = 0 // don't consume. kInterfaceNaked will call ReadArrayStart + // decodeFurther = true + case '"': + z.v = valueTypeString + z.s = d.DecodeString() + default: // number + d.decNum(true) + n := &d.n + // if the string had a any of [.eE], then decode as float. + switch { + case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow: + z.v = valueTypeFloat + z.f = d.floatVal() + case n.exponent == 0: + u := n.mantissa + switch { + case n.neg: + z.v = valueTypeInt + z.i = -int64(u) + case d.h.SignedInteger: + z.v = valueTypeInt + z.i = int64(u) + default: + z.v = valueTypeUint + z.u = u + } + default: + u, overflow := n.uintExp() + switch { + case overflow: + z.v = valueTypeFloat + z.f = d.floatVal() + case n.neg: + z.v = valueTypeInt + z.i = -int64(u) + case d.h.SignedInteger: + z.v = valueTypeInt + z.i = int64(u) + default: + z.v = valueTypeUint + z.u = u + } + } + // fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v) + } + // if decodeFurther { + // d.s.sc.retryRead() + // } + return +} + +//---------------------- + +// JsonHandle is a handle for JSON encoding format. +// +// Json is comprehensively supported: +// - decodes numbers into interface{} as int, uint or float64 +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding +// - UTF-8 support for encoding and decoding +// +// It has better performance than the json library in the standard library, +// by leveraging the performance improvements of the codec library and +// minimizing allocations. +// +// In addition, it doesn't read more bytes than necessary during a decode, which allows +// reading multiple values from a stream containing json and non-json content. +// For example, a user can read a json value, then a cbor value, then a msgpack value, +// all from the same stream in sequence. +type JsonHandle struct { + textEncodingType + BasicHandle + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString uint8 + + // HTMLCharsAsIs controls how to encode some special characters to html: < > & + // + // By default, we encode them as \uXXX + // to prevent security holes when served from some browsers. + HTMLCharsAsIs bool +} + +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + +func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { + hd := jsonEncDriver{e: e, h: h} + hd.bs = hd.b[:0] + + hd.reset() + + return &hd +} + +func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { + // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} + hd := jsonDecDriver{d: d, h: h} + hd.bs = hd.b[:0] + hd.reset() + return &hd +} + +func (e *jsonEncDriver) reset() { + e.w = e.e.w + e.se.i = e.h.RawBytesExt + if e.bs != nil { + e.bs = e.bs[:0] + } + e.d, e.dt, e.dl, e.ds = false, false, 0, "" + e.c = 0 + if e.h.Indent > 0 { + e.d = true + e.ds = jsonSpaces[:e.h.Indent] + } else if e.h.Indent < 0 { + e.d = true + e.dt = true + e.ds = jsonTabs[:-(e.h.Indent)] + } +} + +func (d *jsonDecDriver) reset() { + d.r = d.d.r + d.se.i = d.h.RawBytesExt + if d.bs != nil { + d.bs = d.bs[:0] + } + d.c, d.tok = 0, 0 + d.n.reset() +} + +var jsonEncodeTerminate = []byte{' '} + +func (h *JsonHandle) rpcEncodeTerminate() []byte { + return jsonEncodeTerminate +} + +var _ decDriver = (*jsonDecDriver)(nil) +var _ encDriver = (*jsonEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go new file mode 100644 index 0000000..4907c64 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/msgpack.go @@ -0,0 +1,852 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed + +*/ +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" + "reflect" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 + + // extensions below + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff +) + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} + msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} + msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} + msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + noBuiltInTypes + encNoSeparator + e *Encoder + w encWriter + h *MsgpackHandle + x [8]byte +} + +func (e *msgpackEncDriver) EncodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) EncodeInt(i int64) { + if i >= 0 { + e.EncodeUint(uint64(i)) + } else if i >= -32 { + e.w.writen1(byte(i)) + } else if i >= math.MinInt8 { + e.w.writen2(mpInt8, byte(i)) + } else if i >= math.MinInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i >= math.MinInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + e.w.writen1(byte(i)) + } else if i <= math.MaxUint8 { + e.w.writen2(mpUint8, byte(i)) + } else if i <= math.MaxUint16 { + e.w.writen1(mpUint16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxUint32 { + e.w.writen1(mpUint32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpUint64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) EncodeFloat32(f float32) { + e.w.writen1(mpFloat) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) EncodeFloat64(f float64) { + e.w.writen1(mpDouble) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(v) + if bs == nil { + e.EncodeNil() + return + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeStringBytes(c_RAW, bs) + } +} + +func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + if l == 1 { + e.w.writen2(mpFixExt1, xtag) + } else if l == 2 { + e.w.writen2(mpFixExt2, xtag) + } else if l == 4 { + e.w.writen2(mpFixExt4, xtag) + } else if l == 8 { + e.w.writen2(mpFixExt8, xtag) + } else if l == 16 { + e.w.writen2(mpFixExt16, xtag) + } else if l < 256 { + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + } else if l < 65536 { + e.w.writen1(mpExt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + e.w.writen1(xtag) + } else { + e.w.writen1(mpExt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) EncodeArrayStart(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) EncodeMapStart(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(s)) + } else { + e.writeContainerLen(msgpackContainerStr, len(s)) + } + if len(s) > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(bs)) + } else { + e.writeContainerLen(msgpackContainerStr, len(bs)) + } + if len(bs) > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + if ct.hasFixMin && l < ct.fixCutoff { + e.w.writen1(ct.bFixMin | byte(l)) + } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) { + e.w.writen2(ct.b8, uint8(l)) + } else if l < 65536 { + e.w.writen1(ct.b16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + } else { + e.w.writen1(ct.b32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + d *Decoder + r decReader // *Decoder decReader decReaderT + h *MsgpackHandle + b [scratchByteArrayLen]byte + bd byte + bdRead bool + br bool // bytes reader + noBuiltInTypes + noStreamingCodec + decNoSeparator +} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + n := &d.d.n + var decodeFurther bool + + switch bd { + case mpNil: + n.v = valueTypeNil + d.bdRead = false + case mpFalse: + n.v = valueTypeBool + n.b = false + case mpTrue: + n.v = valueTypeBool + n.b = true + + case mpFloat: + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + case mpDouble: + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + + case mpUint8: + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) + case mpUint16: + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readx(8))) + + case mpInt8: + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) + case mpInt16: + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + n.v = valueTypeString + n.s = d.DecodeString() + } else { + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + n.v = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + n.v = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt + clen := d.readExtLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(clen) + default: + d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } + return +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(bigen.Uint16(d.r.readx(2)))) + case mpUint32: + i = int64(uint64(bigen.Uint32(d.r.readx(4)))) + case mpUint64: + i = int64(bigen.Uint64(d.r.readx(8))) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + i = int64(bigen.Uint64(d.r.readx(8))) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + return + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + d.d.errorf("Overflow int value: %v", i) + return + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + ui = bigen.Uint64(d.r.readx(8)) + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt16: + if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt32: + if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt64: + if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + return + default: + d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + return + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + d.d.errorf("Overflow uint value: %v", ui) + return + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + f = float64(d.DecodeInt(0)) + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("msgpack: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFalse || d.bd == 0 { + // b = false + } else if d.bd == mpTrue || d.bd == 1 { + b = true + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + var clen int + // ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin + if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) + } else { + clen = d.readContainerLen(msgpackContainerStr) + } + // println("DecodeBytes: clen: ", clen) + d.bdRead = false + // bytes may be nil, so handle it. if nil, clen=-1. + if clen < 0 { + return nil + } + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *msgpackDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true, true)) +} + +func (d *msgpackDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *msgpackDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *msgpackDecDriver) ContainerType() (vt valueType) { + bd := d.bd + if bd == mpNil { + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 || + (!d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) { + return valueTypeBytes + } else if d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) { + return valueTypeString + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpNil { + d.bdRead = false + v = true + } + return +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + if bd == mpNil { + clen = -1 // to represent nil + } else if bd == ct.b8 { + clen = int(d.r.readn1()) + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readx(2))) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readx(4))) + } else if (ct.bFixMin & bd) == ct.bFixMin { + clen = int(ct.bFixMin ^ bd) + } else { + d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) ReadMapStart() int { + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) ReadArrayStart() int { + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(bigen.Uint16(d.r.readx(2))) + case mpExt32: + clen = int(bigen.Uint32(d.r.readx(4))) + default: + d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + return + } + return +} + +func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs = d.DecodeBytes(nil, false, true) + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs = d.DecodeBytes(nil, true, true) + } else { + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(clen) + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). + // + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. + // + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. + WriteExt bool + binaryEncodingType +} + +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { + return &msgpackEncDriver{e: e, w: e.w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { + return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *msgpackEncDriver) reset() { + e.w = e.e.w +} + +func (d *msgpackDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + + if c.isClosed() { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var b byte + b, err = c.br.ReadByte() + if err != nil { + return + } + if b != fia { + err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) + return + } + + if err = c.read(&b); err != nil { + return + } + if b != expectTypeByte { + err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) + return + } + if err = c.read(msgid); err != nil { + return + } + if err = c.read(methodOrError); err != nil { + return + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/noop.go b/vendor/github.com/ugorji/go/codec/noop.go new file mode 100644 index 0000000..cfee3d0 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/noop.go @@ -0,0 +1,213 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math/rand" + "time" +) + +// NoopHandle returns a no-op handle. It basically does nothing. +// It is only useful for benchmarking, as it gives an idea of the +// overhead from the codec framework. +// +// LIBRARY USERS: *** DO NOT USE *** +func NoopHandle(slen int) *noopHandle { + h := noopHandle{} + h.rand = rand.New(rand.NewSource(time.Now().UnixNano())) + h.B = make([][]byte, slen) + h.S = make([]string, slen) + for i := 0; i < len(h.S); i++ { + b := make([]byte, i+1) + for j := 0; j < len(b); j++ { + b[j] = 'a' + byte(i) + } + h.B[i] = b + h.S[i] = string(b) + } + return &h +} + +// noopHandle does nothing. +// It is used to simulate the overhead of the codec framework. +type noopHandle struct { + BasicHandle + binaryEncodingType + noopDrv // noopDrv is unexported here, so we can get a copy of it when needed. +} + +type noopDrv struct { + d *Decoder + e *Encoder + i int + S []string + B [][]byte + mks []bool // stack. if map (true), else if array (false) + mk bool // top of stack. what container are we on? map or array? + ct valueType // last response for IsContainerType. + cb int // counter for ContainerType + rand *rand.Rand +} + +func (h *noopDrv) r(v int) int { return h.rand.Intn(v) } +func (h *noopDrv) m(v int) int { h.i++; return h.i % v } + +func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h } +func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h } + +func (h *noopDrv) reset() {} +func (h *noopDrv) uncacheRead() {} + +// --- encDriver + +// stack functions (for map and array) +func (h *noopDrv) start(b bool) { + // println("start", len(h.mks)+1) + h.mks = append(h.mks, b) + h.mk = b +} +func (h *noopDrv) end() { + // println("end: ", len(h.mks)-1) + h.mks = h.mks[:len(h.mks)-1] + if len(h.mks) > 0 { + h.mk = h.mks[len(h.mks)-1] + } else { + h.mk = false + } +} + +func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {} +func (h *noopDrv) EncodeNil() {} +func (h *noopDrv) EncodeInt(i int64) {} +func (h *noopDrv) EncodeUint(i uint64) {} +func (h *noopDrv) EncodeBool(b bool) {} +func (h *noopDrv) EncodeFloat32(f float32) {} +func (h *noopDrv) EncodeFloat64(f float64) {} +func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {} +func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) } +func (h *noopDrv) EncodeMapStart(length int) { h.start(false) } +func (h *noopDrv) EncodeEnd() { h.end() } + +func (h *noopDrv) EncodeString(c charEncoding, v string) {} +func (h *noopDrv) EncodeSymbol(v string) {} +func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {} + +func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {} + +// ---- decDriver +func (h *noopDrv) initReadNext() {} +func (h *noopDrv) CheckBreak() bool { return false } +func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false } +func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {} +func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) } +func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) } +func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) } +func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 } +func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] } + +// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) } + +func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] } + +func (h *noopDrv) ReadEnd() { h.end() } + +// toggle map/slice +func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) } +func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) } + +func (h *noopDrv) ContainerType() (vt valueType) { + // return h.m(2) == 0 + // handle kStruct, which will bomb is it calls this and doesn't get back a map or array. + // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2 + // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs) + // however, every 10th time it is called, we just return something else. + var vals = [...]valueType{valueTypeArray, valueTypeMap} + // ------------ TAKE ------------ + // if h.cb%2 == 0 { + // if h.ct == valueTypeMap || h.ct == valueTypeArray { + // } else { + // h.ct = vals[h.m(2)] + // } + // } else if h.cb%5 == 0 { + // h.ct = valueType(h.m(8)) + // } else { + // h.ct = vals[h.m(2)] + // } + // ------------ TAKE ------------ + // if h.cb%16 == 0 { + // h.ct = valueType(h.cb % 8) + // } else { + // h.ct = vals[h.cb%2] + // } + h.ct = vals[h.cb%2] + h.cb++ + return h.ct + + // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes { + // return h.ct + // } + // return valueTypeUnset + // TODO: may need to tweak this so it works. + // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap { + // h.cb = !h.cb + // h.ct = vt + // return h.cb + // } + // // go in a loop and check it. + // h.ct = vt + // h.cb = h.m(7) == 0 + // return h.cb +} +func (h *noopDrv) TryDecodeAsNil() bool { + if h.mk { + return false + } else { + return h.m(8) == 0 + } +} +func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 { + return 0 +} + +func (h *noopDrv) DecodeNaked() { + // use h.r (random) not h.m() because h.m() could cause the same value to be given. + var sk int + if h.mk { + // if mapkey, do not support values of nil OR bytes, array, map or rawext + sk = h.r(7) + 1 + } else { + sk = h.r(12) + } + n := &h.d.n + switch sk { + case 0: + n.v = valueTypeNil + case 1: + n.v, n.b = valueTypeBool, false + case 2: + n.v, n.b = valueTypeBool, true + case 3: + n.v, n.i = valueTypeInt, h.DecodeInt(64) + case 4: + n.v, n.u = valueTypeUint, h.DecodeUint(64) + case 5: + n.v, n.f = valueTypeFloat, h.DecodeFloat(true) + case 6: + n.v, n.f = valueTypeFloat, h.DecodeFloat(false) + case 7: + n.v, n.s = valueTypeString, h.DecodeString() + case 8: + n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))] + case 9: + n.v = valueTypeArray + case 10: + n.v = valueTypeMap + default: + n.v = valueTypeExt + n.u = h.DecodeUint(64) + n.l = h.B[h.m(len(h.B))] + } + h.ct = n.v + return +} diff --git a/vendor/github.com/ugorji/go/codec/prebuild.go b/vendor/github.com/ugorji/go/codec/prebuild.go new file mode 100644 index 0000000..2353263 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/prebuild.go @@ -0,0 +1,3 @@ +package codec + +//go:generate bash prebuild.sh diff --git a/vendor/github.com/ugorji/go/codec/prebuild.sh b/vendor/github.com/ugorji/go/codec/prebuild.sh new file mode 100644 index 0000000..909f4bb --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/prebuild.sh @@ -0,0 +1,199 @@ +#!/bin/bash + +# _needgen is a helper function to tell if we need to generate files for msgp, codecgen. +_needgen() { + local a="$1" + zneedgen=0 + if [[ ! -e "$a" ]] + then + zneedgen=1 + echo 1 + return 0 + fi + for i in `ls -1 *.go.tmpl gen.go values_test.go` + do + if [[ "$a" -ot "$i" ]] + then + zneedgen=1 + echo 1 + return 0 + fi + done + echo 0 +} + +# _build generates fast-path.go and gen-helper.go. +# +# It is needed because there is some dependency between the generated code +# and the other classes. Consequently, we have to totally remove the +# generated files and put stubs in place, before calling "go run" again +# to recreate them. +_build() { + if ! [[ "${zforce}" == "1" || + "1" == $( _needgen "fast-path.generated.go" ) || + "1" == $( _needgen "gen-helper.generated.go" ) || + "1" == $( _needgen "gen.generated.go" ) || + 1 == 0 ]] + then + return 0 + fi + + # echo "Running prebuild" + if [ "${zbak}" == "1" ] + then + # echo "Backing up old generated files" + _zts=`date '+%m%d%Y_%H%M%S'` + _gg=".generated.go" + [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak + [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak + # [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak + # [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak + else + rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go \ + *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go + fi + + cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl + + cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl + + cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.generated.go < math.MaxInt64 { + // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) + // return + // } + return +} + +func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) { + ui, neg := d.decCheckInteger() + i, overflow := chkOvf.SignedInt(ui) + if overflow { + d.d.errorf("simple: overflow converting %v to signed integer", ui) + return + } + if neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("simple: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == simpleVdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { + f = float64(d.DecodeInt(64)) + } else { + d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + return + } + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("msgpack: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *simpleDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdTrue { + b = true + } else if d.bd == simpleVdFalse { + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) ReadMapStart() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) ReadArrayStart() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) decLen() int { + switch d.bd % 8 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(bigen.Uint16(d.r.readx(2))) + case 3: + ui := uint64(bigen.Uint32(d.r.readx(4))) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return 0 + } + return int(ui) + case 4: + ui := bigen.Uint64(d.r.readx(8)) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return 0 + } + return int(ui) + } + d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true, true)) +} + +func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs = d.DecodeBytes(nil, false, true) + default: + d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := &d.d.n + var decodeFurther bool + + switch d.bd { + case simpleVdNil: + n.v = valueTypeNil + case simpleVdFalse: + n.v = valueTypeBool + n.b = false + case simpleVdTrue: + n.v = valueTypeBool + n.b = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt(64) + } else { + n.v = valueTypeUint + n.u = d.DecodeUint(64) + } + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + n.v = valueTypeInt + n.i = d.DecodeInt(64) + case simpleVdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat(true) + case simpleVdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat(false) + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + n.v = valueTypeString + n.s = d.DecodeString() + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false, false) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + n.v = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Lenght of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + BasicHandle + binaryEncodingType +} + +func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { + return &simpleEncDriver{e: e, w: e.w, h: h} +} + +func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { + return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *simpleEncDriver) reset() { + e.w = e.e.w +} + +func (d *simpleDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*simpleDecDriver)(nil) +var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json new file mode 100644 index 0000000..9028586 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json @@ -0,0 +1,639 @@ +[ + { + "cbor": "AA==", + "hex": "00", + "roundtrip": true, + "decoded": 0 + }, + { + "cbor": "AQ==", + "hex": "01", + "roundtrip": true, + "decoded": 1 + }, + { + "cbor": "Cg==", + "hex": "0a", + "roundtrip": true, + "decoded": 10 + }, + { + "cbor": "Fw==", + "hex": "17", + "roundtrip": true, + "decoded": 23 + }, + { + "cbor": "GBg=", + "hex": "1818", + "roundtrip": true, + "decoded": 24 + }, + { + "cbor": "GBk=", + "hex": "1819", + "roundtrip": true, + "decoded": 25 + }, + { + "cbor": "GGQ=", + "hex": "1864", + "roundtrip": true, + "decoded": 100 + }, + { + "cbor": "GQPo", + "hex": "1903e8", + "roundtrip": true, + "decoded": 1000 + }, + { + "cbor": "GgAPQkA=", + "hex": "1a000f4240", + "roundtrip": true, + "decoded": 1000000 + }, + { + "cbor": "GwAAAOjUpRAA", + "hex": "1b000000e8d4a51000", + "roundtrip": true, + "decoded": 1000000000000 + }, + { + "cbor": "G///////////", + "hex": "1bffffffffffffffff", + "roundtrip": true, + "decoded": 18446744073709551615 + }, + { + "cbor": "wkkBAAAAAAAAAAA=", + "hex": "c249010000000000000000", + "roundtrip": true, + "decoded": 18446744073709551616 + }, + { + "cbor": "O///////////", + "hex": "3bffffffffffffffff", + "roundtrip": true, + "decoded": -18446744073709551616, + "skip": true + }, + { + "cbor": "w0kBAAAAAAAAAAA=", + "hex": "c349010000000000000000", + "roundtrip": true, + "decoded": -18446744073709551617 + }, + { + "cbor": "IA==", + "hex": "20", + "roundtrip": true, + "decoded": -1 + }, + { + "cbor": "KQ==", + "hex": "29", + "roundtrip": true, + "decoded": -10 + }, + { + "cbor": "OGM=", + "hex": "3863", + "roundtrip": true, + "decoded": -100 + }, + { + "cbor": "OQPn", + "hex": "3903e7", + "roundtrip": true, + "decoded": -1000 + }, + { + "cbor": "+QAA", + "hex": "f90000", + "roundtrip": true, + "decoded": 0.0 + }, + { + "cbor": "+YAA", + "hex": "f98000", + "roundtrip": true, + "decoded": -0.0 + }, + { + "cbor": "+TwA", + "hex": "f93c00", + "roundtrip": true, + "decoded": 1.0 + }, + { + "cbor": "+z/xmZmZmZma", + "hex": "fb3ff199999999999a", + "roundtrip": true, + "decoded": 1.1 + }, + { + "cbor": "+T4A", + "hex": "f93e00", + "roundtrip": true, + "decoded": 1.5 + }, + { + "cbor": "+Xv/", + "hex": "f97bff", + "roundtrip": true, + "decoded": 65504.0 + }, + { + "cbor": "+kfDUAA=", + "hex": "fa47c35000", + "roundtrip": true, + "decoded": 100000.0 + }, + { + "cbor": "+n9///8=", + "hex": "fa7f7fffff", + "roundtrip": true, + "decoded": 3.4028234663852886e+38 + }, + { + "cbor": "+3435DyIAHWc", + "hex": "fb7e37e43c8800759c", + "roundtrip": true, + "decoded": 1.0e+300 + }, + { + "cbor": "+QAB", + "hex": "f90001", + "roundtrip": true, + "decoded": 5.960464477539063e-08 + }, + { + "cbor": "+QQA", + "hex": "f90400", + "roundtrip": true, + "decoded": 6.103515625e-05 + }, + { + "cbor": "+cQA", + "hex": "f9c400", + "roundtrip": true, + "decoded": -4.0 + }, + { + "cbor": "+8AQZmZmZmZm", + "hex": "fbc010666666666666", + "roundtrip": true, + "decoded": -4.1 + }, + { + "cbor": "+XwA", + "hex": "f97c00", + "roundtrip": true, + "diagnostic": "Infinity" + }, + { + "cbor": "+X4A", + "hex": "f97e00", + "roundtrip": true, + "diagnostic": "NaN" + }, + { + "cbor": "+fwA", + "hex": "f9fc00", + "roundtrip": true, + "diagnostic": "-Infinity" + }, + { + "cbor": "+n+AAAA=", + "hex": "fa7f800000", + "roundtrip": false, + "diagnostic": "Infinity" + }, + { + "cbor": "+n/AAAA=", + "hex": "fa7fc00000", + "roundtrip": false, + "diagnostic": "NaN" + }, + { + "cbor": "+v+AAAA=", + "hex": "faff800000", + "roundtrip": false, + "diagnostic": "-Infinity" + }, + { + "cbor": "+3/wAAAAAAAA", + "hex": "fb7ff0000000000000", + "roundtrip": false, + "diagnostic": "Infinity" + }, + { + "cbor": "+3/4AAAAAAAA", + "hex": "fb7ff8000000000000", + "roundtrip": false, + "diagnostic": "NaN" + }, + { + "cbor": "+//wAAAAAAAA", + "hex": "fbfff0000000000000", + "roundtrip": false, + "diagnostic": "-Infinity" + }, + { + "cbor": "9A==", + "hex": "f4", + "roundtrip": true, + "decoded": false + }, + { + "cbor": "9Q==", + "hex": "f5", + "roundtrip": true, + "decoded": true + }, + { + "cbor": "9g==", + "hex": "f6", + "roundtrip": true, + "decoded": null + }, + { + "cbor": "9w==", + "hex": "f7", + "roundtrip": true, + "diagnostic": "undefined" + }, + { + "cbor": "8A==", + "hex": "f0", + "roundtrip": true, + "diagnostic": "simple(16)" + }, + { + "cbor": "+Bg=", + "hex": "f818", + "roundtrip": true, + "diagnostic": "simple(24)" + }, + { + "cbor": "+P8=", + "hex": "f8ff", + "roundtrip": true, + "diagnostic": "simple(255)" + }, + { + "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==", + "hex": "c074323031332d30332d32315432303a30343a30305a", + "roundtrip": true, + "diagnostic": "0(\"2013-03-21T20:04:00Z\")" + }, + { + "cbor": "wRpRS2ew", + "hex": "c11a514b67b0", + "roundtrip": true, + "diagnostic": "1(1363896240)" + }, + { + "cbor": "wftB1FLZ7CAAAA==", + "hex": "c1fb41d452d9ec200000", + "roundtrip": true, + "diagnostic": "1(1363896240.5)" + }, + { + "cbor": "10QBAgME", + "hex": "d74401020304", + "roundtrip": true, + "diagnostic": "23(h'01020304')" + }, + { + "cbor": "2BhFZElFVEY=", + "hex": "d818456449455446", + "roundtrip": true, + "diagnostic": "24(h'6449455446')" + }, + { + "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==", + "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d", + "roundtrip": true, + "diagnostic": "32(\"http://www.example.com\")" + }, + { + "cbor": "QA==", + "hex": "40", + "roundtrip": true, + "diagnostic": "h''" + }, + { + "cbor": "RAECAwQ=", + "hex": "4401020304", + "roundtrip": true, + "diagnostic": "h'01020304'" + }, + { + "cbor": "YA==", + "hex": "60", + "roundtrip": true, + "decoded": "" + }, + { + "cbor": "YWE=", + "hex": "6161", + "roundtrip": true, + "decoded": "a" + }, + { + "cbor": "ZElFVEY=", + "hex": "6449455446", + "roundtrip": true, + "decoded": "IETF" + }, + { + "cbor": "YiJc", + "hex": "62225c", + "roundtrip": true, + "decoded": "\"\\" + }, + { + "cbor": "YsO8", + "hex": "62c3bc", + "roundtrip": true, + "decoded": "ü" + }, + { + "cbor": "Y+awtA==", + "hex": "63e6b0b4", + "roundtrip": true, + "decoded": "水" + }, + { + "cbor": "ZPCQhZE=", + "hex": "64f0908591", + "roundtrip": true, + "decoded": "𐅑" + }, + { + "cbor": "gA==", + "hex": "80", + "roundtrip": true, + "decoded": [ + + ] + }, + { + "cbor": "gwECAw==", + "hex": "83010203", + "roundtrip": true, + "decoded": [ + 1, + 2, + 3 + ] + }, + { + "cbor": "gwGCAgOCBAU=", + "hex": "8301820203820405", + "roundtrip": true, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=", + "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819", + "roundtrip": true, + "decoded": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25 + ] + }, + { + "cbor": "oA==", + "hex": "a0", + "roundtrip": true, + "decoded": { + } + }, + { + "cbor": "ogECAwQ=", + "hex": "a201020304", + "roundtrip": true, + "skip": true, + "diagnostic": "{1: 2, 3: 4}" + }, + { + "cbor": "omFhAWFiggID", + "hex": "a26161016162820203", + "roundtrip": true, + "decoded": { + "a": 1, + "b": [ + 2, + 3 + ] + } + }, + { + "cbor": "gmFhoWFiYWM=", + "hex": "826161a161626163", + "roundtrip": true, + "decoded": [ + "a", + { + "b": "c" + } + ] + }, + { + "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF", + "hex": "a56161614161626142616361436164614461656145", + "roundtrip": true, + "decoded": { + "a": "A", + "b": "B", + "c": "C", + "d": "D", + "e": "E" + } + }, + { + "cbor": "X0IBAkMDBAX/", + "hex": "5f42010243030405ff", + "roundtrip": false, + "skip": true, + "diagnostic": "(_ h'0102', h'030405')" + }, + { + "cbor": "f2VzdHJlYWRtaW5n/w==", + "hex": "7f657374726561646d696e67ff", + "roundtrip": false, + "decoded": "streaming" + }, + { + "cbor": "n/8=", + "hex": "9fff", + "roundtrip": false, + "decoded": [ + + ] + }, + { + "cbor": "nwGCAgOfBAX//w==", + "hex": "9f018202039f0405ffff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "nwGCAgOCBAX/", + "hex": "9f01820203820405ff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "gwGCAgOfBAX/", + "hex": "83018202039f0405ff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "gwGfAgP/ggQF", + "hex": "83019f0203ff820405", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=", + "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff", + "roundtrip": false, + "decoded": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25 + ] + }, + { + "cbor": "v2FhAWFinwID//8=", + "hex": "bf61610161629f0203ffff", + "roundtrip": false, + "decoded": { + "a": 1, + "b": [ + 2, + 3 + ] + } + }, + { + "cbor": "gmFhv2FiYWP/", + "hex": "826161bf61626163ff", + "roundtrip": false, + "decoded": [ + "a", + { + "b": "c" + } + ] + }, + { + "cbor": "v2NGdW71Y0FtdCH/", + "hex": "bf6346756ef563416d7421ff", + "roundtrip": false, + "decoded": { + "Fun": true, + "Amt": -2 + } + } +] diff --git a/vendor/github.com/ugorji/go/codec/test.py b/vendor/github.com/ugorji/go/codec/test.py new file mode 100644 index 0000000..f983d9d --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/test.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +# This will create golden files in a directory passed to it. +# A Test calls this internally to create the golden files +# So it can process them (so we don't have to checkin the files). + +# Ensure msgpack-python and cbor are installed first, using: +# sudo apt-get install python-dev +# sudo apt-get install python-pip +# pip install --user msgpack-python msgpack-rpc-python cbor + +# Ensure all "string" keys are utf strings (else encoded as bytes) + +import cbor, msgpack, msgpackrpc, sys, os, threading + +def get_test_data_list(): + # get list with all primitive types, and a combo type + l0 = [ + -8, + -1616, + -32323232, + -6464646464646464, + 192, + 1616, + 32323232, + 6464646464646464, + 192, + -3232.0, + -6464646464.0, + 3232.0, + 6464.0, + 6464646464.0, + False, + True, + u"null", + None, + u"some&day>some 0 + if stopTimeSec > 0: + def myStopRpcServer(): + server.stop() + t = threading.Timer(stopTimeSec, myStopRpcServer) + t.start() + server.start() + +def doRpcClientToPythonSvc(port): + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("Echo123", "A1", "B2", "C3") + print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doRpcClientToGoSvc(port): + # print ">>>> port: ", port, " <<<<<" + address = msgpackrpc.Address('localhost', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) + print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doMain(args): + if len(args) == 2 and args[0] == "testdata": + build_test_data(args[1]) + elif len(args) == 3 and args[0] == "rpc-server": + doRpcServer(int(args[1]), int(args[2])) + elif len(args) == 2 and args[0] == "rpc-client-python-service": + doRpcClientToPythonSvc(int(args[1])) + elif len(args) == 2 and args[0] == "rpc-client-go-service": + doRpcClientToGoSvc(int(args[1])) + else: + print("Usage: test.py " + + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") + +if __name__ == "__main__": + doMain(sys.argv[1:]) + diff --git a/vendor/github.com/ugorji/go/codec/tests.sh b/vendor/github.com/ugorji/go/codec/tests.sh new file mode 100644 index 0000000..fc9f5de --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/tests.sh @@ -0,0 +1,107 @@ +#!/bin/bash + +# Run all the different permutations of all the tests. +# This helps ensure that nothing gets broken. + +_run() { + # 1. VARIATIONS: regular (t), canonical (c), IO R/W (i), + # binc-nosymbols (n), struct2array (s), intern string (e), + # json-indent (d), circular (l) + # 2. MODE: reflection (r), external (x), codecgen (g), unsafe (u), notfastpath (f) + # 3. OPTIONS: verbose (v), reset (z), must (m), + # + # Use combinations of mode to get exactly what you want, + # and then pass the variations you need. + + ztags="" + zargs="" + local OPTIND + OPTIND=1 + # "_xurtcinsvgzmefdl" === "_cdefgilmnrtsuvxz" + while getopts "_cdefgilmnrtsuvwxz" flag + do + case "x$flag" in + 'xr') ;; + 'xf') ztags="$ztags notfastpath" ;; + 'xg') ztags="$ztags codecgen" ;; + 'xx') ztags="$ztags x" ;; + 'xu') ztags="$ztags unsafe" ;; + 'xv') zargs="$zargs -tv" ;; + 'xz') zargs="$zargs -tr" ;; + 'xm') zargs="$zargs -tm" ;; + 'xl') zargs="$zargs -tl" ;; + 'xw') zargs="$zargs -tx=10" ;; + *) ;; + esac + done + # shift $((OPTIND-1)) + printf '............. TAGS: %s .............\n' "$ztags" + # echo ">>>>>>> TAGS: $ztags" + + OPTIND=1 + while getopts "_cdefgilmnrtsuvwxz" flag + do + case "x$flag" in + 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;; + 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;; + 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;; + 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" -run=Binc $zargs -tn; sleep 2 ;; + 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;; + 'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;; + 'xd') printf ">>>>>>> INDENT : "; + go test "-tags=$ztags" -run=JsonCodecsTable -td=-1 $zargs; + go test "-tags=$ztags" -run=JsonCodecsTable -td=8 $zargs; + sleep 2 ;; + *) ;; + esac + done + shift $((OPTIND-1)) + + OPTIND=1 +} + +# echo ">>>>>>> RUNNING VARIATIONS OF TESTS" +if [[ "x$@" = "x" || "x$@" = "x-A" ]]; then + # All: r, x, g, gu + _run "-_tcinsed_ml" # regular + _run "-_tcinsed_ml_z" # regular with reset + _run "-w_tcinsed_ml" # regular with max init len + _run "-_tcinsed_ml_f" # regular with no fastpath (notfastpath) + _run "-x_tcinsed_ml" # external + _run "-gx_tcinsed_ml" # codecgen: requires external + _run "-gxu_tcinsed_ml" # codecgen + unsafe +elif [[ "x$@" = "x-Z" ]]; then + # Regular + _run "-_tcinsed_ml" # regular + _run "-_tcinsed_ml_z" # regular with reset +elif [[ "x$@" = "x-F" ]]; then + # regular with notfastpath + _run "-_tcinsed_ml_f" # regular + _run "-_tcinsed_ml_zf" # regular with reset +elif [[ "x$@" = "x-C" ]]; then + # codecgen + _run "-gx_tcinsed_ml" # codecgen: requires external + _run "-gxu_tcinsed_ml" # codecgen + unsafe + _run "-gxuw_tcinsed_ml" # codecgen + unsafe + maxinitlen +elif [[ "x$@" = "x-X" ]]; then + # external + _run "-x_tcinsed_ml" # external +elif [[ "x$@" = "x-h" || "x$@" = "x-?" ]]; then + cat <= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// DecodeTime decodes a []byte into a time.Time. +func decodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + //tzname[3] = '-' (TODO: verify. this works here) + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +func timeLocUTCName(tzint int16) string { + if tzint == 0 { + return "UTC" + } + var tzname = []byte("UTC+00:00") + //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. + //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first + var tzhr, tzmin int16 + if tzint < 0 { + tzname[3] = '-' // (TODO: verify. this works here) + tzhr, tzmin = -tzint/60, (-tzint)%60 + } else { + tzhr, tzmin = tzint/60, tzint%60 + } + tzname[4] = timeDigits[tzhr/10] + tzname[5] = timeDigits[tzhr%10] + tzname[7] = timeDigits[tzmin/10] + tzname[8] = timeDigits[tzmin%10] + return string(tzname) + //return time.FixedZone(string(tzname), int(tzint)*60) +} diff --git a/vendor/github.com/vmihailenco/msgpack/.travis.yml b/vendor/github.com/vmihailenco/msgpack/.travis.yml new file mode 100644 index 0000000..0c2f74e --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go + +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get gopkg.in/check.v1 diff --git a/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md b/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md new file mode 100644 index 0000000..9a4f38a --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md @@ -0,0 +1,24 @@ +## 3.4 + +- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and DecodeMulti are added as replacement. +- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64. +- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old behavior can be achieved with Encoder.UseCompactEncoding. + +## v3.3 + +- `msgpack:",inline"` tag is restored to force inlining structs. + +## v3.2 + +- Decoding extension types returns pointer to the value instead of the value. Fixes #153 + +## v3 + +- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack. +- Msgpack maps are decoded into map[string]interface{} by default. +- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of DecodeArrayLen. +- Embedded structs are automatically inlined where possible. +- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old format is supported as well. +- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint. There should be no performance differences. +- DecodeInterface can now return int8/16/32 and uint8/16/32. +- PeekCode returns codes.Code instead of byte. diff --git a/vendor/github.com/vmihailenco/msgpack/LICENSE b/vendor/github.com/vmihailenco/msgpack/LICENSE new file mode 100644 index 0000000..b749d07 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/vmihailenco/msgpack Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/msgpack/Makefile b/vendor/github.com/vmihailenco/msgpack/Makefile new file mode 100644 index 0000000..b62ae6a --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/Makefile @@ -0,0 +1,5 @@ +all: + go test ./... + env GOOS=linux GOARCH=386 go test ./... + go test ./... -short -race + go vet diff --git a/vendor/github.com/vmihailenco/msgpack/README.md b/vendor/github.com/vmihailenco/msgpack/README.md new file mode 100644 index 0000000..0c75ae1 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/README.md @@ -0,0 +1,69 @@ +# MessagePack encoding for Golang + +[![Build Status](https://travis-ci.org/vmihailenco/msgpack.svg?branch=v2)](https://travis-ci.org/vmihailenco/msgpack) +[![GoDoc](https://godoc.org/github.com/vmihailenco/msgpack?status.svg)](https://godoc.org/github.com/vmihailenco/msgpack) + +Supports: +- Primitives, arrays, maps, structs, time.Time and interface{}. +- Appengine *datastore.Key and datastore.Cursor. +- [CustomEncoder](https://godoc.org/github.com/vmihailenco/msgpack#example-CustomEncoder)/CustomDecoder interfaces for custom encoding. +- [Extensions](https://godoc.org/github.com/vmihailenco/msgpack#example-RegisterExt) to encode type information. +- Renaming fields via `msgpack:"my_field_name"`. +- Omitting individual empty fields via `msgpack:",omitempty"` tag or all [empty fields in a struct](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--OmitEmpty). +- [Map keys sorting](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.SortMapKeys). +- Encoding/decoding all [structs as arrays](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.StructAsArray) or [individual structs](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--AsArray). +- [Encoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseJSONTag) with [Decoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Decoder.UseJSONTag) can turn msgpack into drop-in replacement for JSON. +- Simple but very fast and efficient [queries](https://godoc.org/github.com/vmihailenco/msgpack#example-Decoder-Query). + +API docs: https://godoc.org/github.com/vmihailenco/msgpack. +Examples: https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples. + +## Installation + +Install: + +```shell +go get -u github.com/vmihailenco/msgpack +``` + +## Quickstart + +```go +func ExampleMarshal() { + type Item struct { + Foo string + } + + b, err := msgpack.Marshal(&Item{Foo: "bar"}) + if err != nil { + panic(err) + } + + var item Item + err = msgpack.Unmarshal(b, &item) + if err != nil { + panic(err) + } + fmt.Println(item.Foo) + // Output: bar +} +``` + +## Benchmark + +``` +BenchmarkStructVmihailencoMsgpack-4 200000 12814 ns/op 2128 B/op 26 allocs/op +BenchmarkStructUgorjiGoMsgpack-4 100000 17678 ns/op 3616 B/op 70 allocs/op +BenchmarkStructUgorjiGoCodec-4 100000 19053 ns/op 7346 B/op 23 allocs/op +BenchmarkStructJSON-4 20000 69438 ns/op 7864 B/op 26 allocs/op +BenchmarkStructGOB-4 10000 104331 ns/op 14664 B/op 278 allocs/op +``` + +## Howto + +Please go through [examples](https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples) to get an idea how to use this package. + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang message task queue](https://github.com/go-msgqueue/msgqueue) diff --git a/vendor/github.com/vmihailenco/msgpack/appengine.go b/vendor/github.com/vmihailenco/msgpack/appengine.go new file mode 100644 index 0000000..e8e91e5 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/appengine.go @@ -0,0 +1,64 @@ +// +build appengine + +package msgpack + +import ( + "reflect" + + ds "google.golang.org/appengine/datastore" +) + +func init() { + Register((*ds.Key)(nil), encodeDatastoreKeyValue, decodeDatastoreKeyValue) + Register((*ds.Cursor)(nil), encodeDatastoreCursorValue, decodeDatastoreCursorValue) +} + +func EncodeDatastoreKey(e *Encoder, key *ds.Key) error { + if key == nil { + return e.EncodeNil() + } + return e.EncodeString(key.Encode()) +} + +func encodeDatastoreKeyValue(e *Encoder, v reflect.Value) error { + key := v.Interface().(*ds.Key) + return EncodeDatastoreKey(e, key) +} + +func DecodeDatastoreKey(d *Decoder) (*ds.Key, error) { + v, err := d.DecodeString() + if err != nil { + return nil, err + } + if v == "" { + return nil, nil + } + return ds.DecodeKey(v) +} + +func decodeDatastoreKeyValue(d *Decoder, v reflect.Value) error { + key, err := DecodeDatastoreKey(d) + if err != nil { + return err + } + v.Set(reflect.ValueOf(key)) + return nil +} + +func encodeDatastoreCursorValue(e *Encoder, v reflect.Value) error { + cursor := v.Interface().(ds.Cursor) + return e.Encode(cursor.String()) +} + +func decodeDatastoreCursorValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + cursor, err := ds.DecodeCursor(s) + if err != nil { + return err + } + v.Set(reflect.ValueOf(cursor)) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/codes/codes.go b/vendor/github.com/vmihailenco/msgpack/codes/codes.go new file mode 100644 index 0000000..28e0a5a --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/codes/codes.go @@ -0,0 +1,90 @@ +package codes + +type Code byte + +var ( + PosFixedNumHigh Code = 0x7f + NegFixedNumLow Code = 0xe0 + + Nil Code = 0xc0 + + False Code = 0xc2 + True Code = 0xc3 + + Float Code = 0xca + Double Code = 0xcb + + Uint8 Code = 0xcc + Uint16 Code = 0xcd + Uint32 Code = 0xce + Uint64 Code = 0xcf + + Int8 Code = 0xd0 + Int16 Code = 0xd1 + Int32 Code = 0xd2 + Int64 Code = 0xd3 + + FixedStrLow Code = 0xa0 + FixedStrHigh Code = 0xbf + FixedStrMask Code = 0x1f + Str8 Code = 0xd9 + Str16 Code = 0xda + Str32 Code = 0xdb + + Bin8 Code = 0xc4 + Bin16 Code = 0xc5 + Bin32 Code = 0xc6 + + FixedArrayLow Code = 0x90 + FixedArrayHigh Code = 0x9f + FixedArrayMask Code = 0xf + Array16 Code = 0xdc + Array32 Code = 0xdd + + FixedMapLow Code = 0x80 + FixedMapHigh Code = 0x8f + FixedMapMask Code = 0xf + Map16 Code = 0xde + Map32 Code = 0xdf + + FixExt1 Code = 0xd4 + FixExt2 Code = 0xd5 + FixExt4 Code = 0xd6 + FixExt8 Code = 0xd7 + FixExt16 Code = 0xd8 + Ext8 Code = 0xc7 + Ext16 Code = 0xc8 + Ext32 Code = 0xc9 +) + +func IsFixedNum(c Code) bool { + return c <= PosFixedNumHigh || c >= NegFixedNumLow +} + +func IsFixedMap(c Code) bool { + return c >= FixedMapLow && c <= FixedMapHigh +} + +func IsFixedArray(c Code) bool { + return c >= FixedArrayLow && c <= FixedArrayHigh +} + +func IsFixedString(c Code) bool { + return c >= FixedStrLow && c <= FixedStrHigh +} + +func IsString(c Code) bool { + return IsFixedString(c) || c == Str8 || c == Str16 || c == Str32 +} + +func IsBin(c Code) bool { + return c == Bin8 || c == Bin16 || c == Bin32 +} + +func IsFixedExt(c Code) bool { + return c >= FixExt1 && c <= FixExt16 +} + +func IsExt(c Code) bool { + return IsFixedExt(c) || c == Ext8 || c == Ext16 || c == Ext32 +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode.go b/vendor/github.com/vmihailenco/msgpack/decode.go new file mode 100644 index 0000000..2edfa9c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode.go @@ -0,0 +1,547 @@ +package msgpack + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +const bytesAllocLimit = 1024 * 1024 // 1mb + +type bufReader interface { + io.Reader + io.ByteScanner +} + +func newBufReader(r io.Reader) bufReader { + if br, ok := r.(bufReader); ok { + return br + } + return bufio.NewReader(r) +} + +func makeBuffer() []byte { + return make([]byte, 0, 64) +} + +// Unmarshal decodes the MessagePack-encoded data and stores the result +// in the value pointed to by v. +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +type Decoder struct { + r io.Reader + s io.ByteScanner + buf []byte + + extLen int + rec []byte // accumulates read data if not nil + + useLoose bool + useJSONTag bool + + decodeMapFunc func(*Decoder) (interface{}, error) +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r +// beyond the MessagePack values requested. Buffering can be disabled +// by passing a reader that implements io.ByteScanner interface. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + buf: makeBuffer(), + } + d.resetReader(r) + return d +} + +func (d *Decoder) SetDecodeMapFunc(fn func(*Decoder) (interface{}, error)) { + d.decodeMapFunc = fn +} + +// UseDecodeInterfaceLoose causes decoder to use DecodeInterfaceLoose +// to decode msgpack value into Go interface{}. +func (d *Decoder) UseDecodeInterfaceLoose(flag bool) *Decoder { + d.useLoose = flag + return d +} + +// UseJSONTag causes the Decoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (d *Decoder) UseJSONTag(v bool) *Decoder { + d.useJSONTag = v + return d +} + +func (d *Decoder) Reset(r io.Reader) error { + d.resetReader(r) + return nil +} + +func (d *Decoder) resetReader(r io.Reader) { + reader := newBufReader(r) + d.r = reader + d.s = reader +} + +func (d *Decoder) Decode(v interface{}) error { + var err error + switch v := v.(type) { + case *string: + if v != nil { + *v, err = d.DecodeString() + return err + } + case *[]byte: + if v != nil { + return d.decodeBytesPtr(v) + } + case *int: + if v != nil { + *v, err = d.DecodeInt() + return err + } + case *int8: + if v != nil { + *v, err = d.DecodeInt8() + return err + } + case *int16: + if v != nil { + *v, err = d.DecodeInt16() + return err + } + case *int32: + if v != nil { + *v, err = d.DecodeInt32() + return err + } + case *int64: + if v != nil { + *v, err = d.DecodeInt64() + return err + } + case *uint: + if v != nil { + *v, err = d.DecodeUint() + return err + } + case *uint8: + if v != nil { + *v, err = d.DecodeUint8() + return err + } + case *uint16: + if v != nil { + *v, err = d.DecodeUint16() + return err + } + case *uint32: + if v != nil { + *v, err = d.DecodeUint32() + return err + } + case *uint64: + if v != nil { + *v, err = d.DecodeUint64() + return err + } + case *bool: + if v != nil { + *v, err = d.DecodeBool() + return err + } + case *float32: + if v != nil { + *v, err = d.DecodeFloat32() + return err + } + case *float64: + if v != nil { + *v, err = d.DecodeFloat64() + return err + } + case *[]string: + return d.decodeStringSlicePtr(v) + case *map[string]string: + return d.decodeMapStringStringPtr(v) + case *map[string]interface{}: + return d.decodeMapStringInterfacePtr(v) + case *time.Duration: + if v != nil { + vv, err := d.DecodeInt64() + *v = time.Duration(vv) + return err + } + case *time.Time: + if v != nil { + *v, err = d.DecodeTime() + return err + } + } + + vv := reflect.ValueOf(v) + if !vv.IsValid() { + return errors.New("msgpack: Decode(nil)") + } + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + vv = vv.Elem() + if !vv.IsValid() { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + return d.DecodeValue(vv) +} + +func (d *Decoder) DecodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := d.Decode(vv); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeInterfaceCond() (interface{}, error) { + if d.useLoose { + return d.DecodeInterfaceLoose() + } + return d.DecodeInterface() +} + +func (d *Decoder) DecodeValue(v reflect.Value) error { + decode := getDecoder(v.Type()) + return decode(d, v) +} + +func (d *Decoder) DecodeNil() error { + c, err := d.readCode() + if err != nil { + return err + } + if c != codes.Nil { + return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) + } + return nil +} + +func (d *Decoder) decodeNilValue(v reflect.Value) error { + err := d.DecodeNil() + if v.IsNil() { + return err + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + v.Set(reflect.Zero(v.Type())) + return err +} + +func (d *Decoder) DecodeBool() (bool, error) { + c, err := d.readCode() + if err != nil { + return false, err + } + return d.bool(c) +} + +func (d *Decoder) bool(c codes.Code) (bool, error) { + if c == codes.False { + return false, nil + } + if c == codes.True { + return true, nil + } + return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) +} + +// DecodeInterface decodes value into interface. It returns following types: +// - nil, +// - bool, +// - int8, int16, int32, int64, +// - uint8, uint16, uint32, uint64, +// - float32 and float64, +// - string, +// - []byte, +// - slices of any of the above, +// - maps of any of the above. +// +// DecodeInterface should be used only when you don't know the type of value +// you are decoding. For example, if you are decoding number it is better to use +// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. +func (d *Decoder) DecodeInterface() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int8(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float: + return d.float32(c) + case codes.Double: + return d.float64(c) + case codes.Uint8: + return d.uint8() + case codes.Uint16: + return d.uint16() + case codes.Uint32: + return d.uint32() + case codes.Uint64: + return d.uint64() + case codes.Int8: + return d.int8() + case codes.Int16: + return d.int16() + case codes.Int32: + return d.int32() + case codes.Int64: + return d.int64() + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// DecodeInterfaceLoose is like DecodeInterface except that: +// - int8, int16, and int32 are converted to int64, +// - uint8, uint16, and uint32 are converted to uint64, +// - float32 is converted to float64. +func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int64(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float, codes.Double: + return d.float64(c) + case codes.Uint8, codes.Uint16, codes.Uint32, codes.Uint64: + return d.uint(c) + case codes.Int8, codes.Int16, codes.Int32, codes.Int64: + return d.int(c) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// Skip skips next value. +func (d *Decoder) Skip() error { + c, err := d.readCode() + if err != nil { + return err + } + + if codes.IsFixedNum(c) { + return nil + } else if codes.IsFixedMap(c) { + return d.skipMap(c) + } else if codes.IsFixedArray(c) { + return d.skipSlice(c) + } else if codes.IsFixedString(c) { + return d.skipBytes(c) + } + + switch c { + case codes.Nil, codes.False, codes.True: + return nil + case codes.Uint8, codes.Int8: + return d.skipN(1) + case codes.Uint16, codes.Int16: + return d.skipN(2) + case codes.Uint32, codes.Int32, codes.Float: + return d.skipN(4) + case codes.Uint64, codes.Int64, codes.Double: + return d.skipN(8) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.skipBytes(c) + case codes.Str8, codes.Str16, codes.Str32: + return d.skipBytes(c) + case codes.Array16, codes.Array32: + return d.skipSlice(c) + case codes.Map16, codes.Map32: + return d.skipMap(c) + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.skipExt(c) + } + + return fmt.Errorf("msgpack: unknown code %x", c) +} + +// PeekCode returns the next MessagePack code without advancing the reader. +// Subpackage msgpack/codes contains list of available codes. +func (d *Decoder) PeekCode() (codes.Code, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return codes.Code(c), d.s.UnreadByte() +} + +func (d *Decoder) hasNilCode() bool { + code, err := d.PeekCode() + return err == nil && code == codes.Nil +} + +func (d *Decoder) readCode() (codes.Code, error) { + d.extLen = 0 + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + if d.rec != nil { + d.rec = append(d.rec, c) + } + return codes.Code(c), nil +} + +func (d *Decoder) readFull(b []byte) error { + _, err := io.ReadFull(d.r, b) + if err != nil { + return err + } + if d.rec != nil { + d.rec = append(d.rec, b...) + } + return nil +} + +func (d *Decoder) readN(n int) ([]byte, error) { + buf, err := readN(d.r, d.buf, n) + if err != nil { + return nil, err + } + d.buf = buf + if d.rec != nil { + d.rec = append(d.rec, buf...) + } + return buf, nil +} + +func readN(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + if n <= bytesAllocLimit { + b = make([]byte, n) + } else { + b = make([]byte, bytesAllocLimit) + } + } + + if n <= cap(b) { + b = b[:n] + _, err := io.ReadFull(r, b) + return b, err + } + b = b[:cap(b)] + + var pos int + for { + alloc := n - len(b) + if alloc > bytesAllocLimit { + alloc = bytesAllocLimit + } + b = append(b, make([]byte, alloc)...) + + _, err := io.ReadFull(r, b[pos:]) + if err != nil { + return nil, err + } + + if len(b) == n { + break + } + pos = len(b) + } + + return b, nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_map.go b/vendor/github.com/vmihailenco/msgpack/decode_map.go new file mode 100644 index 0000000..2a3d3ec --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_map.go @@ -0,0 +1,339 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +const mapElemsAllocLimit = 1e4 + +var mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) +var mapStringStringType = mapStringStringPtrType.Elem() + +var mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) +var mapStringInterfaceType = mapStringInterfacePtrType.Elem() + +var errInvalidCode = errors.New("invalid code") + +func decodeMapValue(d *Decoder, v reflect.Value) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + + typ := v.Type() + if size == -1 { + v.Set(reflect.Zero(typ)) + return nil + } + + if v.IsNil() { + v.Set(reflect.MakeMap(typ)) + } + if size == 0 { + return nil + } + + return decodeMapValueSize(d, v, size) +} + +func decodeMapValueSize(d *Decoder, v reflect.Value, size int) error { + typ := v.Type() + keyType := typ.Key() + valueType := typ.Elem() + + for i := 0; i < size; i++ { + mk := reflect.New(keyType).Elem() + if err := d.DecodeValue(mk); err != nil { + return err + } + + mv := reflect.New(valueType).Elem() + if err := d.DecodeValue(mv); err != nil { + return err + } + + v.SetMapIndex(mk, mv) + } + + return nil +} + +// DecodeMapLen decodes map length. Length is -1 when map is nil. +func (d *Decoder) DecodeMapLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + + if codes.IsExt(c) { + if err = d.skipExtHeader(c); err != nil { + return 0, err + } + + c, err = d.readCode() + if err != nil { + return 0, err + } + } + return d.mapLen(c) +} + +func (d *Decoder) mapLen(c codes.Code) (int, error) { + size, err := d._mapLen(c) + err = expandInvalidCodeMapLenError(c, err) + return size, err +} + +func (d *Decoder) _mapLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } + if c >= codes.FixedMapLow && c <= codes.FixedMapHigh { + return int(c & codes.FixedMapMask), nil + } + if c == codes.Map16 { + size, err := d.uint16() + return int(size), err + } + if c == codes.Map32 { + size, err := d.uint32() + return int(size), err + } + return 0, errInvalidCode +} + +func expandInvalidCodeMapLenError(c codes.Code, err error) error { + if err == errInvalidCode { + return fmt.Errorf("msgpack: invalid code=%x decoding map length", c) + } + return err +} + +func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { + mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) + return d.decodeMapStringStringPtr(mptr) +} + +func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + if size == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]string, min(size, mapElemsAllocLimit)) + m = *ptr + } + + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.DecodeString() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) + return d.decodeMapStringInterfacePtr(ptr) +} + +func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]interface{}, min(n, mapElemsAllocLimit)) + m = *ptr + } + + for i := 0; i < n; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func (d *Decoder) DecodeMap() (interface{}, error) { + if d.decodeMapFunc != nil { + return d.decodeMapFunc(d) + } + + size, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + if size == -1 { + return nil, nil + } + if size == 0 { + return make(map[string]interface{}), nil + } + + code, err := d.PeekCode() + if err != nil { + return nil, err + } + + if codes.IsString(code) || codes.IsBin(code) { + return d.decodeMapStringInterfaceSize(size) + } + + key, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + value, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + keyType := reflect.TypeOf(key) + valueType := reflect.TypeOf(value) + + mapType := reflect.MapOf(keyType, valueType) + mapValue := reflect.MakeMap(mapType) + + mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) + size-- + + err = decodeMapValueSize(d, mapValue, size) + if err != nil { + return nil, err + } + + return mapValue.Interface(), nil +} + +func (d *Decoder) decodeMapStringInterfaceSize(size int) (map[string]interface{}, error) { + m := make(map[string]interface{}, min(size, mapElemsAllocLimit)) + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return nil, err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + m[mk] = mv + } + return m, nil +} + +func (d *Decoder) skipMap(c codes.Code) error { + n, err := d.mapLen(c) + if err != nil { + return err + } + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + if err := d.Skip(); err != nil { + return err + } + } + return nil +} + +func decodeStructValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + var isArray bool + + n, err := d._mapLen(c) + if err != nil { + var err2 error + n, err2 = d.arrayLen(c) + if err2 != nil { + return expandInvalidCodeMapLenError(c, err) + } + isArray = true + } + if n == -1 { + if err = mustSet(v); err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil + } + + var fields *fields + if d.useJSONTag { + fields = jsonStructs.Fields(v.Type()) + } else { + fields = structs.Fields(v.Type()) + } + + if isArray { + for i, f := range fields.List { + if i >= n { + break + } + if err := f.DecodeValue(d, v); err != nil { + return err + } + } + // Skip extra values. + for i := len(fields.List); i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil + } + + for i := 0; i < n; i++ { + name, err := d.DecodeString() + if err != nil { + return err + } + if f := fields.Table[name]; f != nil { + if err := f.DecodeValue(d, v); err != nil { + return err + } + } else { + if err := d.Skip(); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_number.go b/vendor/github.com/vmihailenco/msgpack/decode_number.go new file mode 100644 index 0000000..15019cc --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_number.go @@ -0,0 +1,307 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func (d *Decoder) skipN(n int) error { + _, err := d.readN(n) + return err +} + +func (d *Decoder) uint8() (uint8, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return uint8(c), nil +} + +func (d *Decoder) int8() (int8, error) { + n, err := d.uint8() + return int8(n), err +} + +func (d *Decoder) uint16() (uint16, error) { + b, err := d.readN(2) + if err != nil { + return 0, err + } + return (uint16(b[0]) << 8) | uint16(b[1]), nil +} + +func (d *Decoder) int16() (int16, error) { + n, err := d.uint16() + return int16(n), err +} + +func (d *Decoder) uint32() (uint32, error) { + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := (uint32(b[0]) << 24) | + (uint32(b[1]) << 16) | + (uint32(b[2]) << 8) | + uint32(b[3]) + return n, nil +} + +func (d *Decoder) int32() (int32, error) { + n, err := d.uint32() + return int32(n), err +} + +func (d *Decoder) uint64() (uint64, error) { + b, err := d.readN(8) + if err != nil { + return 0, err + } + n := (uint64(b[0]) << 56) | + (uint64(b[1]) << 48) | + (uint64(b[2]) << 40) | + (uint64(b[3]) << 32) | + (uint64(b[4]) << 24) | + (uint64(b[5]) << 16) | + (uint64(b[6]) << 8) | + uint64(b[7]) + return n, nil +} + +func (d *Decoder) int64() (int64, error) { + n, err := d.uint64() + return int64(n), err +} + +// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go uint64. +func (d *Decoder) DecodeUint64() (uint64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.uint(c) +} + +func (d *Decoder) uint(c codes.Code) (uint64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return uint64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return uint64(n), err + case codes.Int8: + n, err := d.int8() + return uint64(n), err + case codes.Uint16: + n, err := d.uint16() + return uint64(n), err + case codes.Int16: + n, err := d.int16() + return uint64(n), err + case codes.Uint32: + n, err := d.uint32() + return uint64(n), err + case codes.Int32: + n, err := d.int32() + return uint64(n), err + case codes.Uint64, codes.Int64: + return d.uint64() + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) +} + +// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go int64. +func (d *Decoder) DecodeInt64() (int64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.int(c) +} + +func (d *Decoder) int(c codes.Code) (int64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return int64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return int64(n), err + case codes.Int8: + n, err := d.uint8() + return int64(int8(n)), err + case codes.Uint16: + n, err := d.uint16() + return int64(n), err + case codes.Int16: + n, err := d.uint16() + return int64(int16(n)), err + case codes.Uint32: + n, err := d.uint32() + return int64(n), err + case codes.Int32: + n, err := d.uint32() + return int64(int32(n)), err + case codes.Uint64, codes.Int64: + n, err := d.uint64() + return int64(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) +} + +func (d *Decoder) DecodeFloat32() (float32, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float32(c) +} + +func (d *Decoder) float32(c codes.Code) (float32, error) { + if c == codes.Float { + n, err := d.uint32() + if err != nil { + return 0, err + } + return math.Float32frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float32(n), nil +} + +// DecodeFloat64 decodes msgpack float32/64 into Go float64. +func (d *Decoder) DecodeFloat64() (float64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float64(c) +} + +func (d *Decoder) float64(c codes.Code) (float64, error) { + switch c { + case codes.Float: + n, err := d.float32(c) + if err != nil { + return 0, err + } + return float64(n), nil + case codes.Double: + n, err := d.uint64() + if err != nil { + return 0, err + } + return math.Float64frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float64(n), nil +} + +func (d *Decoder) DecodeUint() (uint, error) { + n, err := d.DecodeUint64() + return uint(n), err +} + +func (d *Decoder) DecodeUint8() (uint8, error) { + n, err := d.DecodeUint64() + return uint8(n), err +} + +func (d *Decoder) DecodeUint16() (uint16, error) { + n, err := d.DecodeUint64() + return uint16(n), err +} + +func (d *Decoder) DecodeUint32() (uint32, error) { + n, err := d.DecodeUint64() + return uint32(n), err +} + +func (d *Decoder) DecodeInt() (int, error) { + n, err := d.DecodeInt64() + return int(n), err +} + +func (d *Decoder) DecodeInt8() (int8, error) { + n, err := d.DecodeInt64() + return int8(n), err +} + +func (d *Decoder) DecodeInt16() (int16, error) { + n, err := d.DecodeInt64() + return int16(n), err +} + +func (d *Decoder) DecodeInt32() (int32, error) { + n, err := d.DecodeInt64() + return int32(n), err +} + +func decodeFloat32Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat32() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(float64(f)) + return nil +} + +func decodeFloat64Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(f) + return nil +} + +func decodeInt64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeInt64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetInt(n) + return nil +} + +func decodeUint64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeUint64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetUint(n) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_query.go b/vendor/github.com/vmihailenco/msgpack/decode_query.go new file mode 100644 index 0000000..d680be8 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_query.go @@ -0,0 +1,158 @@ +package msgpack + +import ( + "fmt" + "strconv" + "strings" + + "github.com/vmihailenco/msgpack/codes" +) + +type queryResult struct { + query string + key string + hasAsterisk bool + + values []interface{} +} + +func (q *queryResult) nextKey() { + ind := strings.IndexByte(q.query, '.') + if ind == -1 { + q.key = q.query + q.query = "" + return + } + q.key = q.query[:ind] + q.query = q.query[ind+1:] +} + +// Query extracts data specified by the query from the msgpack stream skipping +// any other data. Query consists of map keys and array indexes separated with dot, +// e.g. key1.0.key2. +func (d *Decoder) Query(query string) ([]interface{}, error) { + res := queryResult{ + query: query, + } + if err := d.query(&res); err != nil { + return nil, err + } + return res.values, nil +} + +func (d *Decoder) query(q *queryResult) error { + q.nextKey() + if q.key == "" { + v, err := d.decodeInterfaceCond() + if err != nil { + return err + } + q.values = append(q.values, v) + return nil + } + + code, err := d.PeekCode() + if err != nil { + return err + } + + switch { + case code == codes.Map16 || code == codes.Map32 || codes.IsFixedMap(code): + err = d.queryMapKey(q) + case code == codes.Array16 || code == codes.Array32 || codes.IsFixedArray(code): + err = d.queryArrayIndex(q) + default: + err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) + } + return err +} + +func (d *Decoder) queryMapKey(q *queryResult) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + for i := 0; i < n; i++ { + k, err := d.bytesNoCopy() + if err != nil { + return err + } + + if string(k) == q.key { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext((n - i - 1) * 2) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) queryArrayIndex(q *queryResult) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + if q.key == "*" { + q.hasAsterisk = true + + query := q.query + for i := 0; i < n; i++ { + q.query = query + if err := d.query(q); err != nil { + return err + } + } + + q.hasAsterisk = false + return nil + } + + ind, err := strconv.Atoi(q.key) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == ind { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext(n - i - 1) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) skipNext(n int) error { + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/decode_slice.go new file mode 100644 index 0000000..778d80b --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_slice.go @@ -0,0 +1,193 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +const sliceElemsAllocLimit = 1e4 + +var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) + +// DecodeArrayLen decodes array length. Length is -1 when array is nil. +func (d *Decoder) DecodeArrayLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.arrayLen(c) +} + +func (d *Decoder) arrayLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if c >= codes.FixedArrayLow && c <= codes.FixedArrayHigh { + return int(c & codes.FixedArrayMask), nil + } + switch c { + case codes.Array16: + n, err := d.uint16() + return int(n), err + case codes.Array32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) +} + +func decodeStringSliceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) + return d.decodeStringSlicePtr(ptr) +} + +func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + ss := setStringsCap(*ptr, n) + for i := 0; i < n; i++ { + s, err := d.DecodeString() + if err != nil { + return err + } + ss = append(ss, s) + } + *ptr = ss + + return nil +} + +func setStringsCap(s []string, n int) []string { + if n > sliceElemsAllocLimit { + n = sliceElemsAllocLimit + } + + if s == nil { + return make([]string, 0, n) + } + + if cap(s) >= n { + return s[:0] + } + + s = s[:cap(s)] + s = append(s, make([]string, n-len(s))...) + return s[:0] +} + +func decodeSliceValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + if n == 0 && v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + return nil + } + + if v.Cap() >= n { + v.Set(v.Slice(0, n)) + } else if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Cap())) + } + + for i := 0; i < n; i++ { + if i >= v.Len() { + v.Set(growSliceValue(v, n)) + } + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func growSliceValue(v reflect.Value, n int) reflect.Value { + diff := n - v.Len() + if diff > sliceElemsAllocLimit { + diff = sliceElemsAllocLimit + } + v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) + return v +} + +func decodeArrayValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + return nil + } + + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + for i := 0; i < n; i++ { + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) DecodeSlice() ([]interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.decodeSlice(c) +} + +func (d *Decoder) decodeSlice(c codes.Code) ([]interface{}, error) { + n, err := d.arrayLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + + s := make([]interface{}, 0, min(n, sliceElemsAllocLimit)) + for i := 0; i < n; i++ { + v, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + s = append(s, v) + } + + return s, nil +} + +func (d *Decoder) skipSlice(c codes.Code) error { + n, err := d.arrayLen(c) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_string.go b/vendor/github.com/vmihailenco/msgpack/decode_string.go new file mode 100644 index 0000000..5402022 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_string.go @@ -0,0 +1,175 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func (d *Decoder) bytesLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if codes.IsFixedString(c) { + return int(c & codes.FixedStrMask), nil + } + switch c { + case codes.Str8, codes.Bin8: + n, err := d.uint8() + return int(n), err + case codes.Str16, codes.Bin16: + n, err := d.uint16() + return int(n), err + case codes.Str32, codes.Bin32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding bytes length", c) +} + +func (d *Decoder) DecodeString() (string, error) { + c, err := d.readCode() + if err != nil { + return "", err + } + return d.string(c) +} + +func (d *Decoder) string(c codes.Code) (string, error) { + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + if n == -1 { + return "", nil + } + b, err := d.readN(n) + return string(b), err +} + +func decodeStringValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetString(s) + return nil +} + +func (d *Decoder) DecodeBytesLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.bytesLen(c) +} + +func (d *Decoder) DecodeBytes() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.bytes(c, nil) +} + +func (d *Decoder) bytes(c codes.Code, b []byte) ([]byte, error) { + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return readN(d.r, b, n) +} + +func (d *Decoder) bytesNoCopy() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return d.readN(n) +} + +func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { + c, err := d.readCode() + if err != nil { + return err + } + return d.bytesPtr(c, ptr) +} + +func (d *Decoder) bytesPtr(c codes.Code, ptr *[]byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + *ptr, err = readN(d.r, *ptr, n) + return err +} + +func (d *Decoder) skipBytes(c codes.Code) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + return d.skipN(n) +} + +func decodeBytesValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + b, err := d.bytes(c, v.Bytes()) + if err != nil { + return err + } + + if err = mustSet(v); err != nil { + return err + } + v.SetBytes(b) + + return nil +} + +func decodeByteArrayValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + b := v.Slice(0, n).Bytes() + return d.readFull(b) +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_value.go b/vendor/github.com/vmihailenco/msgpack/decode_value.go new file mode 100644 index 0000000..7b858b5 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_value.go @@ -0,0 +1,234 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" +) + +var interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() +var stringType = reflect.TypeOf((*string)(nil)).Elem() + +var valueDecoders []decoderFunc + +func init() { + valueDecoders = []decoderFunc{ + reflect.Bool: decodeBoolValue, + reflect.Int: decodeInt64Value, + reflect.Int8: decodeInt64Value, + reflect.Int16: decodeInt64Value, + reflect.Int32: decodeInt64Value, + reflect.Int64: decodeInt64Value, + reflect.Uint: decodeUint64Value, + reflect.Uint8: decodeUint64Value, + reflect.Uint16: decodeUint64Value, + reflect.Uint32: decodeUint64Value, + reflect.Uint64: decodeUint64Value, + reflect.Float32: decodeFloat32Value, + reflect.Float64: decodeFloat64Value, + reflect.Complex64: decodeUnsupportedValue, + reflect.Complex128: decodeUnsupportedValue, + reflect.Array: decodeArrayValue, + reflect.Chan: decodeUnsupportedValue, + reflect.Func: decodeUnsupportedValue, + reflect.Interface: decodeInterfaceValue, + reflect.Map: decodeMapValue, + reflect.Ptr: decodeUnsupportedValue, + reflect.Slice: decodeSliceValue, + reflect.String: decodeStringValue, + reflect.Struct: decodeStructValue, + reflect.UnsafePointer: decodeUnsupportedValue, + } +} + +func mustSet(v reflect.Value) error { + if !v.CanSet() { + return fmt.Errorf("msgpack: Decode(nonsettable %s)", v.Type()) + } + return nil +} + +func getDecoder(typ reflect.Type) decoderFunc { + kind := typ.Kind() + + decoder, ok := typDecMap[typ] + if ok { + return decoder + } + + if typ.Implements(customDecoderType) { + return decodeCustomValue + } + if typ.Implements(unmarshalerType) { + return unmarshalValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customDecoderType) { + return decodeCustomValueAddr + } + if ptr.Implements(unmarshalerType) { + return unmarshalValueAddr + } + } + + switch kind { + case reflect.Ptr: + return ptrDecoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + switch elem.Kind() { + case reflect.Uint8: + return decodeBytesValue + } + switch elem { + case stringType: + return decodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return decodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return decodeMapStringStringValue + case interfaceType: + return decodeMapStringInterfaceValue + } + } + } + return valueDecoders[kind] +} + +func ptrDecoderFunc(typ reflect.Type) decoderFunc { + decoder := getDecoder(typ.Elem()) + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + if err := mustSet(v); err != nil { + return err + } + if !v.IsNil() { + v.Set(reflect.Zero(v.Type())) + } + return d.DecodeNil() + } + if v.IsNil() { + if err := mustSet(v); err != nil { + return err + } + v.Set(reflect.New(v.Type().Elem())) + } + return decoder(d, v.Elem()) + } +} + +func decodeCustomValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return decodeCustomValue(d, v.Addr()) +} + +func decodeCustomValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + decoder := v.Interface().(CustomDecoder) + return decoder.DecodeMsgpack(d) +} + +func unmarshalValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return unmarshalValue(d, v.Addr()) +} + +func unmarshalValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + if d.extLen != 0 { + b, err := d.readN(d.extLen) + if err != nil { + return err + } + d.rec = b + } else { + d.rec = makeBuffer() + if err := d.Skip(); err != nil { + return err + } + } + + unmarshaler := v.Interface().(Unmarshaler) + err := unmarshaler.UnmarshalMsgpack(d.rec) + d.rec = nil + return err +} + +func decodeBoolValue(d *Decoder, v reflect.Value) error { + flag, err := d.DecodeBool() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetBool(flag) + return nil +} + +func decodeInterfaceValue(d *Decoder, v reflect.Value) error { + if v.IsNil() { + return d.interfaceValue(v) + } + + elem := v.Elem() + if !elem.CanAddr() { + if d.hasNilCode() { + v.Set(reflect.Zero(v.Type())) + return d.DecodeNil() + } + } + + return d.DecodeValue(elem) +} + +func (d *Decoder) interfaceValue(v reflect.Value) error { + vv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + + if vv != nil { + if v.Type() == errorType { + if vv, ok := vv.(string); ok { + v.Set(reflect.ValueOf(errors.New(vv))) + return nil + } + } + + v.Set(reflect.ValueOf(vv)) + } + + return nil +} + +func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode.go b/vendor/github.com/vmihailenco/msgpack/encode.go new file mode 100644 index 0000000..c2bb23c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode.go @@ -0,0 +1,170 @@ +package msgpack + +import ( + "bytes" + "io" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +type writer interface { + io.Writer + WriteByte(byte) error + WriteString(string) (int, error) +} + +type byteWriter struct { + io.Writer + + buf []byte + bootstrap [64]byte +} + +func newByteWriter(w io.Writer) *byteWriter { + bw := &byteWriter{ + Writer: w, + } + bw.buf = bw.bootstrap[:] + return bw +} + +func (w *byteWriter) WriteByte(c byte) error { + w.buf = w.buf[:1] + w.buf[0] = c + _, err := w.Write(w.buf) + return err +} + +func (w *byteWriter) WriteString(s string) (int, error) { + w.buf = append(w.buf[:0], s...) + return w.Write(w.buf) +} + +// Marshal returns the MessagePack encoding of v. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + err := NewEncoder(&buf).Encode(v) + return buf.Bytes(), err +} + +type Encoder struct { + w writer + buf []byte + + sortMapKeys bool + structAsArray bool + useJSONTag bool + useCompact bool +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + bw, ok := w.(writer) + if !ok { + bw = newByteWriter(w) + } + return &Encoder{ + w: bw, + buf: make([]byte, 9), + } +} + +// SortMapKeys causes the Encoder to encode map keys in increasing order. +// Supported map types are: +// - map[string]string +// - map[string]interface{} +func (e *Encoder) SortMapKeys(flag bool) *Encoder { + e.sortMapKeys = flag + return e +} + +// StructAsArray causes the Encoder to encode Go structs as MessagePack arrays. +func (e *Encoder) StructAsArray(flag bool) *Encoder { + e.structAsArray = flag + return e +} + +// UseJSONTag causes the Encoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (e *Encoder) UseJSONTag(flag bool) *Encoder { + e.useJSONTag = flag + return e +} + +// UseCompactEncoding causes the Encoder to chose the most compact encoding. +// For example, it allows to encode Go int64 as msgpack int8 saving 7 bytes. +func (e *Encoder) UseCompactEncoding(flag bool) *Encoder { + e.useCompact = flag + return e +} + +func (e *Encoder) Encode(v interface{}) error { + switch v := v.(type) { + case nil: + return e.EncodeNil() + case string: + return e.EncodeString(v) + case []byte: + return e.EncodeBytes(v) + case int: + return e.encodeInt64Cond(int64(v)) + case int64: + return e.encodeInt64Cond(v) + case uint: + return e.encodeUint64Cond(uint64(v)) + case uint64: + return e.encodeUint64Cond(v) + case bool: + return e.EncodeBool(v) + case float32: + return e.EncodeFloat32(v) + case float64: + return e.EncodeFloat64(v) + case time.Duration: + return e.encodeInt64Cond(int64(v)) + case time.Time: + return e.EncodeTime(v) + } + return e.EncodeValue(reflect.ValueOf(v)) +} + +func (e *Encoder) EncodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := e.Encode(vv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeValue(v reflect.Value) error { + fn := getEncoder(v.Type()) + return fn(e, v) +} + +func (e *Encoder) EncodeNil() error { + return e.writeCode(codes.Nil) +} + +func (e *Encoder) EncodeBool(value bool) error { + if value { + return e.writeCode(codes.True) + } + return e.writeCode(codes.False) +} + +func (e *Encoder) writeCode(c codes.Code) error { + return e.w.WriteByte(byte(c)) +} + +func (e *Encoder) write(b []byte) error { + _, err := e.w.Write(b) + return err +} + +func (e *Encoder) writeString(s string) error { + _, err := e.w.WriteString(s) + return err +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_map.go b/vendor/github.com/vmihailenco/msgpack/encode_map.go new file mode 100644 index 0000000..a87c407 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_map.go @@ -0,0 +1,172 @@ +package msgpack + +import ( + "reflect" + "sort" + + "github.com/vmihailenco/msgpack/codes" +) + +func encodeMapValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + for _, key := range v.MapKeys() { + if err := e.EncodeValue(key); err != nil { + return err + } + if err := e.EncodeValue(v.MapIndex(key)); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringStringType).Interface().(map[string]string) + if e.sortMapKeys { + return e.encodeSortedMapStringString(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeString(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) + if e.sortMapKeys { + return e.encodeSortedMapStringInterface(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.Encode(mv); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeString(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringInterface(m map[string]interface{}) error { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.Encode(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) EncodeMapLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedMapLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Map16, uint16(l)) + } + return e.write4(codes.Map32, uint32(l)) +} + +func encodeStructValue(e *Encoder, strct reflect.Value) error { + var structFields *fields + if e.useJSONTag { + structFields = jsonStructs.Fields(strct.Type()) + } else { + structFields = structs.Fields(strct.Type()) + } + + if e.structAsArray || structFields.AsArray { + return encodeStructValueAsArray(e, strct, structFields.List) + } + fields := structFields.OmitEmpty(strct) + + if err := e.EncodeMapLen(len(fields)); err != nil { + return err + } + + for _, f := range fields { + if err := e.EncodeString(f.name); err != nil { + return err + } + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + + return nil +} + +func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { + if err := e.EncodeArrayLen(len(fields)); err != nil { + return err + } + for _, f := range fields { + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_number.go b/vendor/github.com/vmihailenco/msgpack/encode_number.go new file mode 100644 index 0000000..dd7db6f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_number.go @@ -0,0 +1,230 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeUint8(n uint8) error { + return e.write1(codes.Uint8, n) +} + +func (e *Encoder) encodeUint8Cond(n uint8) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint8(n) +} + +// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeUint16(n uint16) error { + return e.write2(codes.Uint16, n) +} + +func (e *Encoder) encodeUint16Cond(n uint16) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint16(n) +} + +// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeUint32(n uint32) error { + return e.write4(codes.Uint32, n) +} + +func (e *Encoder) encodeUint32Cond(n uint32) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint32(n) +} + +// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeUint64(n uint64) error { + return e.write8(codes.Uint64, n) +} + +func (e *Encoder) encodeUint64Cond(n uint64) error { + if e.useCompact { + return e.EncodeUint(n) + } + return e.EncodeUint64(n) +} + +// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeInt8(n int8) error { + return e.write1(codes.Int8, uint8(n)) +} + +func (e *Encoder) encodeInt8Cond(n int8) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt8(n) +} + +// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeInt16(n int16) error { + return e.write2(codes.Int16, uint16(n)) +} + +func (e *Encoder) encodeInt16Cond(n int16) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt16(n) +} + +// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeInt32(n int32) error { + return e.write4(codes.Int32, uint32(n)) +} + +func (e *Encoder) encodeInt32Cond(n int32) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt32(n) +} + +// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeInt64(n int64) error { + return e.write8(codes.Int64, uint64(n)) +} + +func (e *Encoder) encodeInt64Cond(n int64) error { + if e.useCompact { + return e.EncodeInt(n) + } + return e.EncodeInt64(n) +} + +// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeUint(n uint64) error { + if n <= math.MaxInt8 { + return e.w.WriteByte(byte(n)) + } + if n <= math.MaxUint8 { + return e.EncodeUint8(uint8(n)) + } + if n <= math.MaxUint16 { + return e.EncodeUint16(uint16(n)) + } + if n <= math.MaxUint32 { + return e.EncodeUint32(uint32(n)) + } + return e.EncodeUint64(uint64(n)) +} + +// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. +// Type of number is lost during encoding. +func (e *Encoder) EncodeInt(n int64) error { + if n >= 0 { + return e.EncodeUint(uint64(n)) + } + if n >= int64(int8(codes.NegFixedNumLow)) { + return e.w.WriteByte(byte(n)) + } + if n >= math.MinInt8 { + return e.EncodeInt8(int8(n)) + } + if n >= math.MinInt16 { + return e.EncodeInt16(int16(n)) + } + if n >= math.MinInt32 { + return e.EncodeInt32(int32(n)) + } + return e.EncodeInt64(int64(n)) +} + +func (e *Encoder) EncodeFloat32(n float32) error { + return e.write4(codes.Float, math.Float32bits(n)) +} + +func (e *Encoder) EncodeFloat64(n float64) error { + return e.write8(codes.Double, math.Float64bits(n)) +} + +func (e *Encoder) write1(code codes.Code, n uint8) error { + e.buf = e.buf[:2] + e.buf[0] = byte(code) + e.buf[1] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write2(code codes.Code, n uint16) error { + e.buf = e.buf[:3] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 8) + e.buf[2] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write4(code codes.Code, n uint32) error { + e.buf = e.buf[:5] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 24) + e.buf[2] = byte(n >> 16) + e.buf[3] = byte(n >> 8) + e.buf[4] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write8(code codes.Code, n uint64) error { + e.buf = e.buf[:9] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 56) + e.buf[2] = byte(n >> 48) + e.buf[3] = byte(n >> 40) + e.buf[4] = byte(n >> 32) + e.buf[5] = byte(n >> 24) + e.buf[6] = byte(n >> 16) + e.buf[7] = byte(n >> 8) + e.buf[8] = byte(n) + return e.write(e.buf) +} + +func encodeUint8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint8Cond(uint8(v.Uint())) +} + +func encodeUint16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint16Cond(uint16(v.Uint())) +} + +func encodeUint32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint32Cond(uint32(v.Uint())) +} + +func encodeUint64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint64Cond(v.Uint()) +} + +func encodeInt8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt8Cond(int8(v.Int())) +} + +func encodeInt16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt16Cond(int16(v.Int())) +} + +func encodeInt32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt32Cond(int32(v.Int())) +} + +func encodeInt64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt64Cond(v.Int()) +} + +func encodeFloat32Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat32(float32(v.Float())) +} + +func encodeFloat64Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat64(v.Float()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_slice.go b/vendor/github.com/vmihailenco/msgpack/encode_slice.go new file mode 100644 index 0000000..5ddbd63 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_slice.go @@ -0,0 +1,124 @@ +package msgpack + +import ( + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func encodeStringValue(e *Encoder, v reflect.Value) error { + return e.EncodeString(v.String()) +} + +func encodeByteSliceValue(e *Encoder, v reflect.Value) error { + return e.EncodeBytes(v.Bytes()) +} + +func encodeByteArrayValue(e *Encoder, v reflect.Value) error { + if err := e.EncodeBytesLen(v.Len()); err != nil { + return err + } + + if v.CanAddr() { + b := v.Slice(0, v.Len()).Bytes() + return e.write(b) + } + + e.buf = grow(e.buf, v.Len()) + reflect.Copy(reflect.ValueOf(e.buf), v) + return e.write(e.buf) +} + +func grow(b []byte, n int) []byte { + if cap(b) >= n { + return b[:n] + } + b = b[:cap(b)] + b = append(b, make([]byte, n-len(b))...) + return b +} + +func (e *Encoder) EncodeBytesLen(l int) error { + if l < 256 { + return e.write1(codes.Bin8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Bin16, uint16(l)) + } + return e.write4(codes.Bin32, uint32(l)) +} + +func (e *Encoder) encodeStrLen(l int) error { + if l < 32 { + return e.writeCode(codes.FixedStrLow | codes.Code(l)) + } + if l < 256 { + return e.write1(codes.Str8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Str16, uint16(l)) + } + return e.write4(codes.Str32, uint32(l)) +} + +func (e *Encoder) EncodeString(v string) error { + if err := e.encodeStrLen(len(v)); err != nil { + return err + } + return e.writeString(v) +} + +func (e *Encoder) EncodeBytes(v []byte) error { + if v == nil { + return e.EncodeNil() + } + if err := e.EncodeBytesLen(len(v)); err != nil { + return err + } + return e.write(v) +} + +func (e *Encoder) EncodeArrayLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedArrayLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Array16, uint16(l)) + } + return e.write4(codes.Array32, uint32(l)) +} + +func (e *Encoder) encodeStringSlice(s []string) error { + if s == nil { + return e.EncodeNil() + } + if err := e.EncodeArrayLen(len(s)); err != nil { + return err + } + for _, v := range s { + if err := e.EncodeString(v); err != nil { + return err + } + } + return nil +} + +func encodeSliceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encodeArrayValue(e, v) +} + +func encodeArrayValue(e *Encoder, v reflect.Value) error { + l := v.Len() + if err := e.EncodeArrayLen(l); err != nil { + return err + } + for i := 0; i < l; i++ { + if err := e.EncodeValue(v.Index(i)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_value.go b/vendor/github.com/vmihailenco/msgpack/encode_value.go new file mode 100644 index 0000000..b46ab02 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_value.go @@ -0,0 +1,167 @@ +package msgpack + +import ( + "fmt" + "reflect" +) + +var valueEncoders []encoderFunc + +func init() { + valueEncoders = []encoderFunc{ + reflect.Bool: encodeBoolValue, + reflect.Int: encodeInt64CondValue, + reflect.Int8: encodeInt8CondValue, + reflect.Int16: encodeInt16CondValue, + reflect.Int32: encodeInt32CondValue, + reflect.Int64: encodeInt64CondValue, + reflect.Uint: encodeUint64CondValue, + reflect.Uint8: encodeUint8CondValue, + reflect.Uint16: encodeUint16CondValue, + reflect.Uint32: encodeUint32CondValue, + reflect.Uint64: encodeUint64CondValue, + reflect.Float32: encodeFloat32Value, + reflect.Float64: encodeFloat64Value, + reflect.Complex64: encodeUnsupportedValue, + reflect.Complex128: encodeUnsupportedValue, + reflect.Array: encodeArrayValue, + reflect.Chan: encodeUnsupportedValue, + reflect.Func: encodeUnsupportedValue, + reflect.Interface: encodeInterfaceValue, + reflect.Map: encodeMapValue, + reflect.Ptr: encodeUnsupportedValue, + reflect.Slice: encodeSliceValue, + reflect.String: encodeStringValue, + reflect.Struct: encodeStructValue, + reflect.UnsafePointer: encodeUnsupportedValue, + } +} + +func getEncoder(typ reflect.Type) encoderFunc { + if encoder, ok := typEncMap[typ]; ok { + return encoder + } + + if typ.Implements(customEncoderType) { + return encodeCustomValue + } + if typ.Implements(marshalerType) { + return marshalValue + } + + kind := typ.Kind() + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customEncoderType) { + return encodeCustomValuePtr + } + if ptr.Implements(marshalerType) { + return marshalValuePtr + } + } + + if typ == errorType { + return encodeErrorValue + } + + switch kind { + case reflect.Ptr: + return ptrEncoderFunc(typ) + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return encodeMapStringStringValue + case interfaceType: + return encodeMapStringInterfaceValue + } + } + } + return valueEncoders[kind] +} + +func ptrEncoderFunc(typ reflect.Type) encoderFunc { + encoder := getEncoder(typ.Elem()) + return func(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encoder(e, v.Elem()) + } +} + +func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + encoder := v.Addr().Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func encodeCustomValue(e *Encoder, v reflect.Value) error { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return e.EncodeNil() + } + } + + encoder := v.Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func marshalValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalValue(e, v.Addr()) +} + +func marshalValue(e *Encoder, v reflect.Value) error { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return e.EncodeNil() + } + } + + marshaler := v.Interface().(Marshaler) + b, err := marshaler.MarshalMsgpack() + if err != nil { + return err + } + _, err = e.w.Write(b) + return err +} + +func encodeBoolValue(e *Encoder, v reflect.Value) error { + return e.EncodeBool(v.Bool()) +} + +func encodeInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeValue(v.Elem()) +} + +func encodeErrorValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeString(v.Interface().(error).Error()) +} + +func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/ext.go b/vendor/github.com/vmihailenco/msgpack/ext.go new file mode 100644 index 0000000..7970be8 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/ext.go @@ -0,0 +1,238 @@ +package msgpack + +import ( + "bytes" + "fmt" + "reflect" + "sync" + + "github.com/vmihailenco/msgpack/codes" +) + +var extTypes = make(map[int8]reflect.Type) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// RegisterExt records a type, identified by a value for that type, +// under the provided id. That id will identify the concrete type of a value +// sent or received as an interface variable. Only types that will be +// transferred as implementations of interface values need to be registered. +// Expecting to be used only during initialization, it panics if the mapping +// between types and ids is not a bijection. +func RegisterExt(id int8, value interface{}) { + typ := reflect.TypeOf(value) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + ptr := reflect.PtrTo(typ) + + if _, ok := extTypes[id]; ok { + panic(fmt.Errorf("msgpack: ext with id=%d is already registered", id)) + } + + registerExt(id, ptr, getEncoder(ptr), getDecoder(ptr)) + registerExt(id, typ, getEncoder(typ), getDecoder(typ)) +} + +func registerExt(id int8, typ reflect.Type, enc encoderFunc, dec decoderFunc) { + if dec != nil { + extTypes[id] = typ + } + if enc != nil { + typEncMap[typ] = makeExtEncoder(id, enc) + } + if dec != nil { + typDecMap[typ] = makeExtDecoder(id, dec) + } +} + +func (e *Encoder) EncodeExtHeader(typeId int8, length int) error { + if err := e.encodeExtLen(length); err != nil { + return err + } + if err := e.w.WriteByte(byte(typeId)); err != nil { + return err + } + return nil +} + +func makeExtEncoder(typeId int8, enc encoderFunc) encoderFunc { + return func(e *Encoder, v reflect.Value) error { + buf := bufferPool.Get().(*bytes.Buffer) + defer bufferPool.Put(buf) + buf.Reset() + + oldw := e.w + e.w = buf + err := enc(e, v) + e.w = oldw + + if err != nil { + return err + } + + err = e.EncodeExtHeader(typeId, buf.Len()) + if err != nil { + return err + } + return e.write(buf.Bytes()) + } +} + +func makeExtDecoder(typeId int8, dec decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + c, err := d.PeekCode() + if err != nil { + return err + } + + if !codes.IsExt(c) { + return dec(d, v) + } + + id, extLen, err := d.DecodeExtHeader() + if err != nil { + return err + } + + if id != typeId { + return fmt.Errorf("msgpack: got ext type=%d, wanted %d", int8(c), typeId) + } + + d.extLen = extLen + return dec(d, v) + } +} + +func (e *Encoder) encodeExtLen(l int) error { + switch l { + case 1: + return e.writeCode(codes.FixExt1) + case 2: + return e.writeCode(codes.FixExt2) + case 4: + return e.writeCode(codes.FixExt4) + case 8: + return e.writeCode(codes.FixExt8) + case 16: + return e.writeCode(codes.FixExt16) + } + if l < 256 { + return e.write1(codes.Ext8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Ext16, uint16(l)) + } + return e.write4(codes.Ext32, uint32(l)) +} + +func (d *Decoder) parseExtLen(c codes.Code) (int, error) { + switch c { + case codes.FixExt1: + return 1, nil + case codes.FixExt2: + return 2, nil + case codes.FixExt4: + return 4, nil + case codes.FixExt8: + return 8, nil + case codes.FixExt16: + return 16, nil + case codes.Ext8: + n, err := d.uint8() + return int(n), err + case codes.Ext16: + n, err := d.uint16() + return int(n), err + case codes.Ext32: + n, err := d.uint32() + return int(n), err + default: + return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext length", c) + } +} + +func (d *Decoder) decodeExtHeader(c codes.Code) (int8, int, error) { + length, err := d.parseExtLen(c) + if err != nil { + return 0, 0, err + } + + typeId, err := d.readCode() + if err != nil { + return 0, 0, err + } + + return int8(typeId), length, nil +} + +func (d *Decoder) DecodeExtHeader() (typeId int8, length int, err error) { + c, err := d.readCode() + if err != nil { + return + } + return d.decodeExtHeader(c) +} + +func (d *Decoder) extInterface(c codes.Code) (interface{}, error) { + extId, extLen, err := d.decodeExtHeader(c) + if err != nil { + return nil, err + } + + typ, ok := extTypes[extId] + if !ok { + return nil, fmt.Errorf("msgpack: unregistered ext id=%d", extId) + } + + v := reflect.New(typ) + + d.extLen = extLen + err = d.DecodeValue(v.Elem()) + d.extLen = 0 + if err != nil { + return nil, err + } + + return v.Interface(), nil +} + +func (d *Decoder) skipExt(c codes.Code) error { + n, err := d.parseExtLen(c) + if err != nil { + return err + } + return d.skipN(n + 1) +} + +func (d *Decoder) skipExtHeader(c codes.Code) error { + // Read ext type. + _, err := d.readCode() + if err != nil { + return err + } + // Read ext body len. + for i := 0; i < extHeaderLen(c); i++ { + _, err := d.readCode() + if err != nil { + return err + } + } + return nil +} + +func extHeaderLen(c codes.Code) int { + switch c { + case codes.Ext8: + return 1 + case codes.Ext16: + return 2 + case codes.Ext32: + return 4 + } + return 0 +} diff --git a/vendor/github.com/vmihailenco/msgpack/msgpack.go b/vendor/github.com/vmihailenco/msgpack/msgpack.go new file mode 100644 index 0000000..220b43c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/msgpack.go @@ -0,0 +1,17 @@ +package msgpack + +type Marshaler interface { + MarshalMsgpack() ([]byte, error) +} + +type Unmarshaler interface { + UnmarshalMsgpack([]byte) error +} + +type CustomEncoder interface { + EncodeMsgpack(*Encoder) error +} + +type CustomDecoder interface { + DecodeMsgpack(*Decoder) error +} diff --git a/vendor/github.com/vmihailenco/msgpack/tag.go b/vendor/github.com/vmihailenco/msgpack/tag.go new file mode 100644 index 0000000..48e6f94 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/tag.go @@ -0,0 +1,42 @@ +package msgpack + +import ( + "strings" +) + +type tagOptions string + +func (o tagOptions) Get(name string) (string, bool) { + s := string(o) + for len(s) > 0 { + var next string + idx := strings.IndexByte(s, ',') + if idx >= 0 { + s, next = s[:idx], s[idx+1:] + } + if strings.HasPrefix(s, name) { + return s[len(name):], true + } + s = next + } + return "", false +} + +func (o tagOptions) Contains(name string) bool { + _, ok := o.Get(name) + return ok +} + +func parseTag(tag string) (string, tagOptions) { + if idx := strings.IndexByte(tag, ','); idx != -1 { + name := tag[:idx] + if strings.IndexByte(name, ':') == -1 { + return name, tagOptions(tag[idx+1:]) + } + } + + if strings.IndexByte(tag, ':') == -1 { + return tag, "" + } + return "", tagOptions(tag) +} diff --git a/vendor/github.com/vmihailenco/msgpack/time.go b/vendor/github.com/vmihailenco/msgpack/time.go new file mode 100644 index 0000000..f740969 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/time.go @@ -0,0 +1,145 @@ +package msgpack + +import ( + "encoding/binary" + "fmt" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +var timeExtId int8 = -1 + +func init() { + timeType := reflect.TypeOf((*time.Time)(nil)).Elem() + registerExt(timeExtId, timeType, encodeTimeValue, decodeTimeValue) +} + +func (e *Encoder) EncodeTime(tm time.Time) error { + b := e.encodeTime(tm) + if err := e.encodeExtLen(len(b)); err != nil { + return err + } + if err := e.w.WriteByte(byte(timeExtId)); err != nil { + return err + } + return e.write(b) +} + +func (e *Encoder) encodeTime(tm time.Time) []byte { + secs := uint64(tm.Unix()) + if secs>>34 == 0 { + data := uint64(tm.Nanosecond())<<34 | secs + if data&0xffffffff00000000 == 0 { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(data)) + return b + } else { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, data) + return b + } + } + + b := make([]byte, 12) + binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) + binary.BigEndian.PutUint64(b[4:], uint64(secs)) + return b +} + +func (d *Decoder) DecodeTime() (time.Time, error) { + tm, err := d.decodeTime() + if err != nil { + return tm, err + } + + if tm.IsZero() { + // Assume that zero time does not have timezone information. + return tm.UTC(), nil + } + return tm, nil +} + +func (d *Decoder) decodeTime() (time.Time, error) { + extLen := d.extLen + d.extLen = 0 + if extLen == 0 { + c, err := d.readCode() + if err != nil { + return time.Time{}, err + } + + // Legacy format. + if c == codes.FixedArrayLow|2 { + sec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + nsec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + return time.Unix(sec, nsec), nil + } + + if codes.IsString(c) { + s, err := d.string(c) + if err != nil { + return time.Time{}, err + } + return time.Parse(time.RFC3339Nano, s) + } + + extLen, err = d.parseExtLen(c) + if err != nil { + return time.Time{}, err + } + + // Skip ext id. + _, err = d.s.ReadByte() + if err != nil { + return time.Time{}, nil + } + } + + b, err := d.readN(extLen) + if err != nil { + return time.Time{}, err + } + + switch len(b) { + case 4: + sec := binary.BigEndian.Uint32(b) + return time.Unix(int64(sec), 0), nil + case 8: + sec := binary.BigEndian.Uint64(b) + nsec := int64(sec >> 34) + sec &= 0x00000003ffffffff + return time.Unix(int64(sec), nsec), nil + case 12: + nsec := binary.BigEndian.Uint32(b) + sec := binary.BigEndian.Uint64(b[4:]) + return time.Unix(int64(sec), int64(nsec)), nil + default: + err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) + return time.Time{}, err + } +} + +func encodeTimeValue(e *Encoder, v reflect.Value) error { + tm := v.Interface().(time.Time) + b := e.encodeTime(tm) + return e.write(b) +} + +func decodeTimeValue(d *Decoder, v reflect.Value) error { + tm, err := d.DecodeTime() + if err != nil { + return err + } + v.Set(reflect.ValueOf(tm)) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/types.go b/vendor/github.com/vmihailenco/msgpack/types.go new file mode 100644 index 0000000..6a1bf7f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/types.go @@ -0,0 +1,310 @@ +package msgpack + +import ( + "reflect" + "sync" +) + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() +var customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() + +var marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +var unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + +type encoderFunc func(*Encoder, reflect.Value) error +type decoderFunc func(*Decoder, reflect.Value) error + +var typEncMap = make(map[reflect.Type]encoderFunc) +var typDecMap = make(map[reflect.Type]decoderFunc) + +// Register registers encoder and decoder functions for a value. +// This is low level API and in most cases you should prefer implementing +// Marshaler/CustomEncoder and Unmarshaler/CustomDecoder interfaces. +func Register(value interface{}, enc encoderFunc, dec decoderFunc) { + typ := reflect.TypeOf(value) + if enc != nil { + typEncMap[typ] = enc + } + if dec != nil { + typDecMap[typ] = dec + } +} + +//------------------------------------------------------------------------------ + +var structs = newStructCache(false) +var jsonStructs = newStructCache(true) + +type structCache struct { + mu sync.RWMutex + m map[reflect.Type]*fields + + useJSONTag bool +} + +func newStructCache(useJSONTag bool) *structCache { + return &structCache{ + m: make(map[reflect.Type]*fields), + + useJSONTag: useJSONTag, + } +} + +func (m *structCache) Fields(typ reflect.Type) *fields { + m.mu.RLock() + fs, ok := m.m[typ] + m.mu.RUnlock() + if ok { + return fs + } + + m.mu.Lock() + fs, ok = m.m[typ] + if !ok { + fs = getFields(typ, m.useJSONTag) + m.m[typ] = fs + } + m.mu.Unlock() + + return fs +} + +//------------------------------------------------------------------------------ + +type field struct { + name string + index []int + omitEmpty bool + encoder encoderFunc + decoder decoderFunc +} + +func (f *field) value(v reflect.Value) reflect.Value { + return fieldByIndex(v, f.index) +} + +func (f *field) Omit(strct reflect.Value) bool { + return f.omitEmpty && isEmptyValue(f.value(strct)) +} + +func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { + return f.encoder(e, f.value(strct)) +} + +func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { + return f.decoder(d, f.value(strct)) +} + +//------------------------------------------------------------------------------ + +type fields struct { + Table map[string]*field + List []*field + AsArray bool + + hasOmitEmpty bool +} + +func newFields(numField int) *fields { + return &fields{ + Table: make(map[string]*field, numField), + List: make([]*field, 0, numField), + } +} + +func (fs *fields) Add(field *field) { + fs.Table[field.name] = field + fs.List = append(fs.List, field) + if field.omitEmpty { + fs.hasOmitEmpty = true + } +} + +func (fs *fields) OmitEmpty(strct reflect.Value) []*field { + if !fs.hasOmitEmpty { + return fs.List + } + + fields := make([]*field, 0, len(fs.List)) + for _, f := range fs.List { + if !f.Omit(strct) { + fields = append(fields, f) + } + } + return fields +} + +func getFields(typ reflect.Type, useJSONTag bool) *fields { + numField := typ.NumField() + fs := newFields(numField) + + var omitEmpty bool + for i := 0; i < numField; i++ { + f := typ.Field(i) + + tag := f.Tag.Get("msgpack") + if useJSONTag && tag == "" { + tag = f.Tag.Get("json") + } + + name, opt := parseTag(tag) + if name == "-" { + continue + } + + if f.Name == "_msgpack" { + if opt.Contains("asArray") { + fs.AsArray = true + } + if opt.Contains("omitempty") { + omitEmpty = true + } + } + + if f.PkgPath != "" && !f.Anonymous { + continue + } + + field := &field{ + name: name, + index: f.Index, + omitEmpty: omitEmpty || opt.Contains("omitempty"), + encoder: getEncoder(f.Type), + decoder: getDecoder(f.Type), + } + + if field.name == "" { + field.name = f.Name + } + + if f.Anonymous && !opt.Contains("noinline") { + inline := opt.Contains("inline") + if inline { + inlineFields(fs, f.Type, field, useJSONTag) + } else { + inline = autoinlineFields(fs, f.Type, field, useJSONTag) + } + if inline { + fs.Table[field.name] = field + continue + } + } + + fs.Add(field) + } + return fs +} + +var encodeStructValuePtr uintptr +var decodeStructValuePtr uintptr + +func init() { + encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() + decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() +} + +func inlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) { + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Table[field.name]; ok { + // Don't inline shadowed fields. + continue + } + field.index = append(f.index, field.index...) + fs.Add(field) + } +} + +func autoinlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) bool { + var encoder encoderFunc + var decoder decoderFunc + + if typ.Kind() == reflect.Struct { + encoder = f.encoder + decoder = f.decoder + } else { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + encoder = getEncoder(typ) + decoder = getDecoder(typ) + } + if typ.Kind() != reflect.Struct { + return false + } + } + + if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { + return false + } + if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { + return false + } + + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Table[field.name]; ok { + // Don't auto inline if there are shadowed fields. + return false + } + } + + for _, field := range inlinedFields { + field.index = append(f.index, field.index...) + fs.Add(field) + } + return true +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func fieldByIndex(v reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return v.Field(index[0]) + } + for i, x := range index { + if i > 0 { + var ok bool + v, ok = indirectNew(v) + if !ok { + return v + } + } + v = v.Field(x) + } + return v +} + +func indirectNew(v reflect.Value) (reflect.Value, bool) { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + return v, false + } + elemType := v.Type().Elem() + if elemType.Kind() != reflect.Struct { + return v, false + } + v.Set(reflect.New(elemType)) + } + v = v.Elem() + } + return v, true +} diff --git a/vendor/github.com/zclconf/go-cty/LICENSE b/vendor/github.com/zclconf/go-cty/LICENSE new file mode 100644 index 0000000..d6503b5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017-2018 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/zclconf/go-cty/cty/capsule.go b/vendor/github.com/zclconf/go-cty/cty/capsule.go new file mode 100644 index 0000000..d273d14 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/capsule.go @@ -0,0 +1,89 @@ +package cty + +import ( + "fmt" + "reflect" +) + +type capsuleType struct { + typeImplSigil + Name string + GoType reflect.Type +} + +func (t *capsuleType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(*capsuleType); ok { + // capsule types compare by pointer identity + return otherP == t + } + return false +} + +func (t *capsuleType) FriendlyName(mode friendlyTypeNameMode) string { + return t.Name +} + +func (t *capsuleType) GoString() string { + // To get a useful representation of our native type requires some + // shenanigans. + victimVal := reflect.Zero(t.GoType) + return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface()) +} + +// Capsule creates a new Capsule type. +// +// A Capsule type is a special type that can be used to transport arbitrary +// Go native values of a given type through the cty type system. A language +// that uses cty as its type system might, for example, provide functions +// that return capsule-typed values and then other functions that operate +// on those values. +// +// From cty's perspective, Capsule types have a few interesting characteristics, +// described in the following paragraphs. +// +// Each capsule type has an associated Go native type that it is able to +// transport. Capsule types compare by identity, so each call to the +// Capsule function creates an entirely-distinct cty Type, even if two calls +// use the same native type. +// +// Each capsule-typed value contains a pointer to a value of the given native +// type. A capsule-typed value supports no operations except equality, and +// equality is implemented by pointer identity of the encapsulated pointer. +// +// The given name is used as the new type's "friendly name". This can be any +// string in principle, but will usually be a short, all-lowercase name aimed +// at users of the embedding language (i.e. not mention Go-specific details) +// and will ideally not create ambiguity with any predefined cty type. +// +// Capsule types are never introduced by any standard cty operation, so a +// calling application opts in to including them within its own type system +// by creating them and introducing them via its own functions. At that point, +// the application is responsible for dealing with any capsule-typed values +// that might be returned. +func Capsule(name string, nativeType reflect.Type) Type { + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + }, + } +} + +// IsCapsuleType returns true if this type is a capsule type, as created +// by cty.Capsule . +func (t Type) IsCapsuleType() bool { + _, ok := t.typeImpl.(*capsuleType) + return ok +} + +// EncapsulatedType returns the encapsulated native type of a capsule type, +// or panics if the receiver is not a Capsule type. +// +// Is IsCapsuleType to determine if this method is safe to call. +func (t Type) EncapsulatedType() reflect.Type { + impl, ok := t.typeImpl.(*capsuleType) + if !ok { + panic("not a capsule type") + } + return impl.GoType +} diff --git a/vendor/github.com/zclconf/go-cty/cty/collection.go b/vendor/github.com/zclconf/go-cty/cty/collection.go new file mode 100644 index 0000000..ab3919b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/collection.go @@ -0,0 +1,34 @@ +package cty + +import ( + "errors" +) + +type collectionTypeImpl interface { + ElementType() Type +} + +// IsCollectionType returns true if the given type supports the operations +// that are defined for all collection types. +func (t Type) IsCollectionType() bool { + _, ok := t.typeImpl.(collectionTypeImpl) + return ok +} + +// ElementType returns the element type of the receiver if it is a collection +// type, or panics if it is not. Use IsCollectionType first to test whether +// this method will succeed. +func (t Type) ElementType() Type { + if ct, ok := t.typeImpl.(collectionTypeImpl); ok { + return ct.ElementType() + } + panic(errors.New("not a collection type")) +} + +// ElementCallback is a callback type used for iterating over elements of +// collections and attributes of objects. +// +// The types of key and value depend on what type is being iterated over. +// Return true to stop iterating after the current element, or false to +// continue iterating. +type ElementCallback func(key Value, val Value) (stop bool) diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go new file mode 100644 index 0000000..d84f6ac --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go @@ -0,0 +1,165 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// compareTypes implements a preference order for unification. +// +// The result of this method is not useful for anything other than unification +// preferences, since it assumes that the caller will verify that any suggested +// conversion is actually possible and it is thus able to to make certain +// optimistic assumptions. +func compareTypes(a cty.Type, b cty.Type) int { + + // DynamicPseudoType always has lowest preference, because anything can + // convert to it (it acts as a placeholder for "any type") and we want + // to optimistically assume that any dynamics will converge on matching + // their neighbors. + if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType { + if a != cty.DynamicPseudoType { + return -1 + } + if b != cty.DynamicPseudoType { + return 1 + } + return 0 + } + + if a.IsPrimitiveType() && b.IsPrimitiveType() { + // String is a supertype of all primitive types, because we can + // represent all primitive values as specially-formatted strings. + if a == cty.String || b == cty.String { + if a != cty.String { + return 1 + } + if b != cty.String { + return -1 + } + return 0 + } + } + + if a.IsListType() && b.IsListType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsSetType() && b.IsSetType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsMapType() && b.IsMapType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + + // From this point on we may have swapped the two items in order to + // simplify our cases. Therefore any non-zero return after this point + // must be multiplied by "swap" to potentially invert the return value + // if needed. + swap := 1 + switch { + case a.IsTupleType() && b.IsListType(): + fallthrough + case a.IsObjectType() && b.IsMapType(): + fallthrough + case a.IsSetType() && b.IsTupleType(): + fallthrough + case a.IsSetType() && b.IsListType(): + a, b = b, a + swap = -1 + } + + if b.IsSetType() && (a.IsTupleType() || a.IsListType()) { + // We'll just optimistically assume that the element types are + // unifyable/convertible, and let a second recursive pass + // figure out how to make that so. + return -1 * swap + } + + if a.IsListType() && b.IsTupleType() { + // We'll just optimistically assume that the tuple's element types + // can be unified into something compatible with the list's element + // type. + return -1 * swap + } + + if a.IsMapType() && b.IsObjectType() { + // We'll just optimistically assume that the object's attribute types + // can be unified into something compatible with the map's element + // type. + return -1 * swap + } + + // For object and tuple types, comparing two types doesn't really tell + // the whole story because it may be possible to construct a new type C + // that is the supertype of both A and B by unifying each attribute/element + // separately. That possibility is handled by Unify as a follow-up if + // type sorting is insufficient to produce a valid result. + // + // Here we will take care of the simple possibilities where no new type + // is needed. + if a.IsObjectType() && b.IsObjectType() { + atysA := a.AttributeTypes() + atysB := b.AttributeTypes() + + if len(atysA) != len(atysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for k := range atysA { + if _, has := atysB[k]; !has { + return 0 + } + + cmp := compareTypes(atysA[k], atysB[k]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + if a.IsTupleType() && b.IsTupleType() { + etysA := a.TupleElementTypes() + etysB := b.TupleElementTypes() + + if len(etysA) != len(etysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for i := range etysA { + cmp := compareTypes(etysA[i], etysB[i]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + + return 0 +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go new file mode 100644 index 0000000..c773b52 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go @@ -0,0 +1,136 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversion is an internal variant of Conversion that carries around +// a cty.Path to be used in error responses. +type conversion func(cty.Value, cty.Path) (cty.Value, error) + +func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { + conv := getConversionKnown(in, out, unsafe) + if conv == nil { + return nil + } + + // Wrap the conversion in some standard checks that we don't want to + // have to repeat in every conversion function. + return func(in cty.Value, path cty.Path) (cty.Value, error) { + if !in.IsKnown() { + return cty.UnknownVal(out), nil + } + if in.IsNull() { + // We'll pass through nulls, albeit type converted, and let + // the caller deal with whatever handling they want to do in + // case null values are considered valid in some applications. + return cty.NullVal(out), nil + } + + return conv(in, path) + } +} + +func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { + switch { + + case out == cty.DynamicPseudoType: + // Conversion *to* DynamicPseudoType means that the caller wishes + // to allow any type in this position, so we'll produce a do-nothing + // conversion that just passes through the value as-is. + return dynamicPassthrough + + case unsafe && in == cty.DynamicPseudoType: + // Conversion *from* DynamicPseudoType means that we have a value + // whose type isn't yet known during type checking. For these we will + // assume that conversion will succeed and deal with any errors that + // result (which is why we can only do this when "unsafe" is set). + return dynamicFixup(out) + + case in.IsPrimitiveType() && out.IsPrimitiveType(): + conv := primitiveConversionsSafe[in][out] + if conv != nil { + return conv + } + if unsafe { + return primitiveConversionsUnsafe[in][out] + } + return nil + + case out.IsObjectType() && in.IsObjectType(): + return conversionObjectToObject(in, out, unsafe) + + case out.IsListType() && (in.IsListType() || in.IsSetType()): + inEty := in.ElementType() + outEty := out.ElementType() + if inEty.Equals(outEty) { + // This indicates that we're converting from list to set with + // the same element type, so we don't need an element converter. + return conversionCollectionToList(outEty, nil) + } + + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToList(outEty, convEty) + + case out.IsSetType() && (in.IsListType() || in.IsSetType()): + if in.IsListType() && !unsafe { + // Conversion from list to map is unsafe because it will lose + // information: the ordering will not be preserved, and any + // duplicate elements will be conflated. + return nil + } + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if inEty.Equals(outEty) { + // This indicates that we're converting from set to list with + // the same element type, so we don't need an element converter. + return conversionCollectionToSet(outEty, nil) + } + + if convEty == nil { + return nil + } + return conversionCollectionToSet(outEty, convEty) + + case out.IsMapType() && in.IsMapType(): + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToMap(outEty, convEty) + + case out.IsListType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToList(in, outEty, unsafe) + + case out.IsSetType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToSet(in, outEty, unsafe) + + case out.IsMapType() && in.IsObjectType(): + outEty := out.ElementType() + return conversionObjectToMap(in, outEty, unsafe) + + default: + return nil + + } +} + +// retConversion wraps a conversion (internal type) so it can be returned +// as a Conversion (public type). +func retConversion(conv conversion) Conversion { + if conv == nil { + return nil + } + + return func(in cty.Value) (cty.Value, error) { + return conv(in, cty.Path(nil)) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go new file mode 100644 index 0000000..c2ac14e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go @@ -0,0 +1,340 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionCollectionToList returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a list. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a list. (For example, +// if we're converting from a set into a list of the same element type.) +func conversionCollectionToList(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(elems), nil + } +} + +// conversionCollectionToSet returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a set. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a set. (For example, +// if we're converting from a list into a set of the same element type.) +func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(elems), nil + } +} + +// conversionCollectionToMap returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a map. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a map. +func conversionCollectionToMap(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, 0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + keyStr, err := Convert(key, cty.String) + if err != nil { + // Should never happen, because keys can only be numbers or + // strings and both can convert to string. + return cty.DynamicVal, path.NewErrorf("cannot convert key type %s to string for map", key.Type().FriendlyName()) + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elems[keyStr.AsString()] = val + } + + if len(elems) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(elems), nil + } +} + +// conversionTupleToSet returns a conversion that will take a value of the +// given tuple type and return a set of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToSet(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + path = append(path, nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.SetVal(elems), nil + } +} + +// conversionTupleToList returns a conversion that will take a value of the +// given tuple type and return a list of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + path = append(path, nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.ListVal(elems), nil + } +} + +// conversionObjectToMap returns a conversion that will take a value of the +// given object type and return a map of the given element type. +// +// Will panic if the given objectType isn't actually an object type. +func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion { + objectAtys := objectType.AttributeTypes() + + if len(objectAtys) == 0 { + // Empty object short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.MapValEmpty(mapEty), nil + } + } + + if mapEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + objectAtysList := make([]cty.Type, 0, len(objectAtys)) + for _, aty := range objectAtys { + objectAtysList = append(objectAtysList, aty) + } + mapEty, _ = unify(objectAtysList, unsafe) + if mapEty == cty.NilType { + return nil + } + } + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(objectAty, mapEty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems[name.AsString()] = val + } + + return cty.MapVal(elems), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go new file mode 100644 index 0000000..4d19cf6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go @@ -0,0 +1,33 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// dynamicFixup deals with just-in-time conversions of values that were +// input-typed as cty.DynamicPseudoType during analysis, ensuring that +// we end up with the desired output type once the value is known, or +// failing with an error if that is not possible. +// +// This is in the spirit of the cty philosophy of optimistically assuming that +// DynamicPseudoType values will become the intended value eventually, and +// dealing with any inconsistencies during final evaluation. +func dynamicFixup(wantType cty.Type) conversion { + return func(in cty.Value, path cty.Path) (cty.Value, error) { + ret, err := Convert(in, wantType) + if err != nil { + // Re-wrap this error so that the returned path is relative + // to the caller's original value, rather than relative to our + // conversion value here. + return cty.NilVal, path.NewError(err) + } + return ret, nil + } +} + +// dynamicPassthrough is an identity conversion that is used when the +// target type is DynamicPseudoType, indicating that the caller doesn't care +// which type is returned. +func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { + return in, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go new file mode 100644 index 0000000..62dabb8 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go @@ -0,0 +1,76 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionObjectToObject returns a conversion that will make the input +// object type conform to the output object type, if possible. +// +// Conversion is possible only if the output type is a subset of the input +// type, meaning that each attribute of the output type has a corresponding +// attribute in the input type where a recursive conversion is available. +// +// Shallow object conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit the above definition of "subset". +func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion { + inAtys := in.AttributeTypes() + outAtys := out.AttributeTypes() + attrConvs := make(map[string]conversion) + + for name, outAty := range outAtys { + inAty, exists := inAtys[name] + if !exists { + // No conversion is available, then. + return nil + } + + if inAty.Equals(outAty) { + // No conversion needed, but we'll still record the attribute + // in our map for later reference. + attrConvs[name] = nil + continue + } + + attrConvs[name] = getConversion(inAty, outAty, unsafe) + if attrConvs[name] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the attribute + // conversions given in attrConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + attrVals := make(map[string]cty.Value, len(attrConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + for it := val.ElementIterator(); it.Next(); { + nameVal, val := it.Element() + var err error + + name := nameVal.AsString() + *pathStep = cty.GetAttrStep{ + Name: name, + } + + conv, exists := attrConvs[name] + if !exists { + continue + } + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + attrVals[name] = val + } + + return cty.ObjectVal(attrVals), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go new file mode 100644 index 0000000..e563ee3 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go @@ -0,0 +1,50 @@ +package convert + +import ( + "math/big" + + "github.com/zclconf/go-cty/cty" +) + +var stringTrue = cty.StringVal("true") +var stringFalse = cty.StringVal("false") + +var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{ + cty.Number: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + f := val.AsBigFloat() + return cty.StringVal(f.Text('f', -1)), nil + }, + }, + cty.Bool: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + if val.True() { + return stringTrue, nil + } else { + return stringFalse, nil + } + }, + }, +} + +var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{ + cty.String: { + cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) { + f, _, err := big.ParseFloat(val.AsString(), 10, 512, big.ToNearestEven) + if err != nil { + return cty.NilVal, path.NewErrorf("a number is required") + } + return cty.NumberVal(f), nil + }, + cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) { + switch val.AsString() { + case "true", "1": + return cty.True, nil + case "false", "0": + return cty.False, nil + default: + return cty.NilVal, path.NewErrorf("a bool is required") + } + }, + }, +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/doc.go b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go new file mode 100644 index 0000000..2037299 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go @@ -0,0 +1,15 @@ +// Package convert contains some routines for converting between cty types. +// The intent of providing this package is to encourage applications using +// cty to have consistent type conversion behavior for maximal interoperability +// when Values pass from one application to another. +// +// The conversions are categorized into two categories. "Safe" conversions are +// ones that are guaranteed to succeed if given a non-null value of the +// appropriate source type. "Unsafe" conversions, on the other hand, are valid +// for only a subset of input values, and thus may fail with an error when +// called for values outside of that valid subset. +// +// The functions whose names end in Unsafe support all of the conversions that +// are supported by the corresponding functions whose names do not have that +// suffix, and then additional unsafe conversions as well. +package convert diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go b/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go new file mode 100644 index 0000000..88a4a25 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go @@ -0,0 +1,135 @@ +package convert + +import ( + "bytes" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +// MismatchMessage is a helper to return an English-language description of +// the differences between got and want, phrased as a reason why got does +// not conform to want. +// +// This function does not itself attempt conversion, and so it should generally +// be used only after a conversion has failed, to report the conversion failure +// to an English-speaking user. The result will be confusing got is actually +// conforming to or convertable to want. +// +// The shorthand helper function Convert uses this function internally to +// produce its error messages, so callers of that function do not need to +// also use MismatchMessage. +// +// This function is similar to Type.TestConformance, but it is tailored to +// describing conversion failures and so the messages it generates relate +// specifically to the conversion rules implemented in this package. +func MismatchMessage(got, want cty.Type) string { + switch { + + case got.IsObjectType() && want.IsObjectType(): + // If both types are object types then we may be able to say something + // about their respective attributes. + return mismatchMessageObjects(got, want) + + case got.IsTupleType() && want.IsListType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to list failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all list elements must have the same type" + + case got.IsTupleType() && want.IsSetType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to set failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all set elements must have the same type" + + case got.IsObjectType() && want.IsMapType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from object to map failed then it's because we couldn't + // find a common type to convert all of the object attributes to. + return "all map elements must have the same type" + + default: + // If we have nothing better to say, we'll just state what was required. + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageObjects(got, want cty.Type) string { + // Per our conversion rules, "got" is allowed to be a superset of "want", + // and so we'll produce error messages here under that assumption. + gotAtys := got.AttributeTypes() + wantAtys := want.AttributeTypes() + + // If we find missing attributes then we'll report those in preference, + // but if not then we will report a maximum of one non-conforming + // attribute, just to keep our messages relatively terse. + // We'll also prefer to report a recursive type error from an _unsafe_ + // conversion over a safe one, because these are subjectively more + // "serious". + var missingAttrs []string + var unsafeMismatchAttr string + var safeMismatchAttr string + + for name, wantAty := range wantAtys { + gotAty, exists := gotAtys[name] + if !exists { + missingAttrs = append(missingAttrs, name) + continue + } + + // We'll now try to convert these attributes in isolation and + // see if we have a nested conversion error to report. + // We'll try an unsafe conversion first, and then fall back on + // safe if unsafe is possible. + + // If we already have an unsafe mismatch attr error then we won't bother + // hunting for another one. + if unsafeMismatchAttr != "" { + continue + } + if conv := GetConversionUnsafe(gotAty, wantAty); conv == nil { + unsafeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + + // If we already have a safe mismatch attr error then we won't bother + // hunting for another one. + if safeMismatchAttr != "" { + continue + } + if conv := GetConversion(gotAty, wantAty); conv == nil { + safeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + } + + // We should now have collected at least one problem. If we have more than + // one then we'll use our preference order to decide what is most important + // to report. + switch { + + case len(missingAttrs) != 0: + switch len(missingAttrs) { + case 1: + return fmt.Sprintf("attribute %q is required", missingAttrs[0]) + case 2: + return fmt.Sprintf("attributes %q and %q are required", missingAttrs[0], missingAttrs[1]) + default: + sort.Strings(missingAttrs) + var buf bytes.Buffer + for _, name := range missingAttrs[:len(missingAttrs)-1] { + fmt.Fprintf(&buf, "%q, ", name) + } + fmt.Fprintf(&buf, "and %q", missingAttrs[len(missingAttrs)-1]) + return fmt.Sprintf("attributes %s are required", buf.Bytes()) + } + + case unsafeMismatchAttr != "": + return unsafeMismatchAttr + + case safeMismatchAttr != "": + return safeMismatchAttr + + default: + // We should never get here, but if we do then we'll return + // just a generic message. + return "incorrect object attributes" + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/public.go b/vendor/github.com/zclconf/go-cty/cty/convert/public.go new file mode 100644 index 0000000..af19bdc --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/public.go @@ -0,0 +1,83 @@ +package convert + +import ( + "errors" + + "github.com/zclconf/go-cty/cty" +) + +// This file contains the public interface of this package, which is intended +// to be a small, convenient interface designed for easy integration into +// a hypothetical language type checker and interpreter. + +// Conversion is a named function type representing a conversion from a +// value of one type to a value of another type. +// +// The source type for a conversion is always the source type given to +// the function that returned the Conversion, but there is no way to recover +// that from a Conversion value itself. If a Conversion is given a value +// that is not of its expected type (with the exception of DynamicPseudoType, +// which is always supported) then the function may panic or produce undefined +// results. +type Conversion func(in cty.Value) (out cty.Value, err error) + +// GetConversion returns a Conversion between the given in and out Types if +// a safe one is available, or returns nil otherwise. +func GetConversion(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, false)) +} + +// GetConversionUnsafe returns a Conversion between the given in and out Types +// if either a safe or unsafe one is available, or returns nil otherwise. +func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, true)) +} + +// Convert returns the result of converting the given value to the given type +// if an safe or unsafe conversion is available, or returns an error if such a +// conversion is impossible. +// +// This is a convenience wrapper around calling GetConversionUnsafe and then +// immediately passing the given value to the resulting function. +func Convert(in cty.Value, want cty.Type) (cty.Value, error) { + if in.Type().Equals(want) { + return in, nil + } + + conv := GetConversionUnsafe(in.Type(), want) + if conv == nil { + return cty.NilVal, errors.New(MismatchMessage(in.Type(), want)) + } + return conv(in) +} + +// Unify attempts to find the most general type that can be converted from +// all of the given types. If this is possible, that type is returned along +// with a slice of necessary conversions for some of the given types. +// +// If no common supertype can be found, this function returns cty.NilType and +// a nil slice. +// +// If a common supertype *can* be found, the returned slice will always be +// non-nil and will contain a non-nil conversion for each given type that +// needs to be converted, with indices corresponding to the input slice. +// Any given type that does *not* need conversion (because it is already of +// the appropriate type) will have a nil Conversion. +// +// cty.DynamicPseudoType is, as usual, a special case. If the given type list +// contains a mixture of dynamic and non-dynamic types, the dynamic types are +// disregarded for type selection and a conversion is returned for them that +// will attempt a late conversion of the given value to the target type, +// failing with a conversion error if the eventual concrete type is not +// compatible. If *all* given types are DynamicPseudoType, or in the +// degenerate case of an empty slice of types, the returned type is itself +// cty.DynamicPseudoType and no conversions are attempted. +func Unify(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, false) +} + +// UnifyUnsafe is the same as Unify except that it may return unsafe +// conversions in situations where a safe conversion isn't also available. +func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, true) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go new file mode 100644 index 0000000..b776910 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go @@ -0,0 +1,69 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// sortTypes produces an ordering of the given types that serves as a +// preference order for the result of unification of the given types. +// The return value is a slice of indices into the given slice, and will +// thus always be the same length as the given slice. +// +// The goal is that the most general of the given types will appear first +// in the ordering. If there are uncomparable pairs of types in the list +// then they will appear in an undefined order, and the unification pass +// will presumably then fail. +func sortTypes(tys []cty.Type) []int { + l := len(tys) + + // First we build a graph whose edges represent "more general than", + // which we will then do a topological sort of. + edges := make([][]int, l) + for i := 0; i < (l - 1); i++ { + for j := i + 1; j < l; j++ { + cmp := compareTypes(tys[i], tys[j]) + switch { + case cmp < 0: + edges[i] = append(edges[i], j) + case cmp > 0: + edges[j] = append(edges[j], i) + } + } + } + + // Compute the in-degree of each node + inDegree := make([]int, l) + for _, outs := range edges { + for _, j := range outs { + inDegree[j]++ + } + } + + // The array backing our result will double as our queue for visiting + // the nodes, with the queue slice moving along this array until it + // is empty and positioned at the end of the array. Thus our visiting + // order is also our result order. + result := make([]int, l) + queue := result[0:0] + + // Initialize the queue with any item of in-degree 0, preserving + // their relative order. + for i, n := range inDegree { + if n == 0 { + queue = append(queue, i) + } + } + + for len(queue) != 0 { + i := queue[0] + queue = queue[1:] + for _, j := range edges[i] { + inDegree[j]-- + if inDegree[j] == 0 { + queue = append(queue, j) + } + } + } + + return result +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/unify.go b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go new file mode 100644 index 0000000..bd6736b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go @@ -0,0 +1,66 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// The current unify implementation is somewhat inefficient, but we accept this +// under the assumption that it will generally be used with small numbers of +// types and with types of reasonable complexity. However, it does have a +// "happy path" where all of the given types are equal. +// +// This function is likely to have poor performance in cases where any given +// types are very complex (lots of deeply-nested structures) or if the list +// of types itself is very large. In particular, it will walk the nested type +// structure under the given types several times, especially when given a +// list of types for which unification is not possible, since each permutation +// will be tried to determine that result. +func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + if len(types) == 0 { + // Degenerate case + return cty.NilType, nil + } + + prefOrder := sortTypes(types) + + // sortTypes gives us an order where earlier items are preferable as + // our result type. We'll now walk through these and choose the first + // one we encounter for which conversions exist for all source types. + conversions := make([]Conversion, len(types)) +Preferences: + for _, wantTypeIdx := range prefOrder { + wantType := types[wantTypeIdx] + for i, tryType := range types { + if i == wantTypeIdx { + // Don't need to convert our wanted type to itself + conversions[i] = nil + continue + } + + if tryType.Equals(wantType) { + conversions[i] = nil + continue + } + + if unsafe { + conversions[i] = GetConversionUnsafe(tryType, wantType) + } else { + conversions[i] = GetConversion(tryType, wantType) + } + + if conversions[i] == nil { + // wantType is not a suitable unification type, so we'll + // try the next one in our preference order. + continue Preferences + } + } + + return wantType, conversions + } + + // TODO: For structural types, try to invent a new type that they + // can all be unified to, by unifying their respective attributes. + + // If we fall out here, no unification is possible + return cty.NilType, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/doc.go b/vendor/github.com/zclconf/go-cty/cty/doc.go new file mode 100644 index 0000000..d31f054 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/doc.go @@ -0,0 +1,18 @@ +// Package cty (pronounced see-tie) provides some infrastructure for a type +// system that might be useful for applications that need to represent +// configuration values provided by the user whose types are not known +// at compile time, particularly if the calling application also allows +// such values to be used in expressions. +// +// The type system consists of primitive types Number, String and Bool, as +// well as List and Map collection types and Object types that can have +// arbitrarily-typed sets of attributes. +// +// A set of operations is defined on these types, which is accessible via +// the wrapper struct Value, which annotates the raw, internal representation +// of a value with its corresponding type. +// +// This package is oriented towards being a building block for configuration +// languages used to bootstrap an application. It is not optimized for use +// in tight loops where CPU time or memory pressure are a concern. +package cty diff --git a/vendor/github.com/zclconf/go-cty/cty/element_iterator.go b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go new file mode 100644 index 0000000..0bf84c7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go @@ -0,0 +1,191 @@ +package cty + +import ( + "sort" + + "github.com/zclconf/go-cty/cty/set" +) + +// ElementIterator is the interface type returned by Value.ElementIterator to +// allow the caller to iterate over elements of a collection-typed value. +// +// Its usage pattern is as follows: +// +// it := val.ElementIterator() +// for it.Next() { +// key, val := it.Element() +// // ... +// } +type ElementIterator interface { + Next() bool + Element() (key Value, value Value) +} + +func canElementIterator(val Value) bool { + switch { + case val.ty.IsListType(): + return true + case val.ty.IsMapType(): + return true + case val.ty.IsSetType(): + return true + case val.ty.IsTupleType(): + return true + case val.ty.IsObjectType(): + return true + default: + return false + } +} + +func elementIterator(val Value) ElementIterator { + switch { + case val.ty.IsListType(): + return &listElementIterator{ + ety: val.ty.ElementType(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsMapType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same input map. + rawMap := val.v.(map[string]interface{}) + keys := make([]string, 0, len(rawMap)) + for key := range rawMap { + keys = append(keys, key) + } + sort.Strings(keys) + + return &mapElementIterator{ + ety: val.ty.ElementType(), + vals: rawMap, + keys: keys, + idx: -1, + } + case val.ty.IsSetType(): + rawSet := val.v.(set.Set) + return &setElementIterator{ + ety: val.ty.ElementType(), + setIt: rawSet.Iterator(), + } + case val.ty.IsTupleType(): + return &tupleElementIterator{ + etys: val.ty.TupleElementTypes(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsObjectType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same object type. + atys := val.ty.AttributeTypes() + keys := make([]string, 0, len(atys)) + for key := range atys { + keys = append(keys, key) + } + sort.Strings(keys) + + return &objectElementIterator{ + atys: atys, + vals: val.v.(map[string]interface{}), + attrNames: keys, + idx: -1, + } + default: + panic("attempt to iterate on non-collection, non-tuple type") + } +} + +type listElementIterator struct { + ety Type + vals []interface{} + idx int +} + +func (it *listElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.ety, + v: it.vals[i], + } +} + +func (it *listElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type mapElementIterator struct { + ety Type + vals map[string]interface{} + keys []string + idx int +} + +func (it *mapElementIterator) Element() (Value, Value) { + key := it.keys[it.idx] + return StringVal(key), Value{ + ty: it.ety, + v: it.vals[key], + } +} + +func (it *mapElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.keys) +} + +type setElementIterator struct { + ety Type + setIt *set.Iterator +} + +func (it *setElementIterator) Element() (Value, Value) { + val := Value{ + ty: it.ety, + v: it.setIt.Value(), + } + return val, val +} + +func (it *setElementIterator) Next() bool { + return it.setIt.Next() +} + +type tupleElementIterator struct { + etys []Type + vals []interface{} + idx int +} + +func (it *tupleElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.etys[i], + v: it.vals[i], + } +} + +func (it *tupleElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type objectElementIterator struct { + atys map[string]Type + vals map[string]interface{} + attrNames []string + idx int +} + +func (it *objectElementIterator) Element() (Value, Value) { + key := it.attrNames[it.idx] + return StringVal(key), Value{ + ty: it.atys[key], + v: it.vals[key], + } +} + +func (it *objectElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.attrNames) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/error.go b/vendor/github.com/zclconf/go-cty/cty/error.go new file mode 100644 index 0000000..dd139f7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/error.go @@ -0,0 +1,55 @@ +package cty + +import ( + "fmt" +) + +// PathError is a specialization of error that represents where in a +// potentially-deep data structure an error occured, using a Path. +type PathError struct { + error + Path Path +} + +func errorf(path Path, f string, args ...interface{}) error { + // We need to copy the Path because often our caller builds it by + // continually mutating the same underlying buffer. + sPath := make(Path, len(path)) + copy(sPath, path) + return PathError{ + error: fmt.Errorf(f, args...), + Path: sPath, + } +} + +// NewErrorf creates a new PathError for the current path by passing the +// given format and arguments to fmt.Errorf and then wrapping the result +// similarly to NewError. +func (p Path) NewErrorf(f string, args ...interface{}) error { + return errorf(p, f, args...) +} + +// NewError creates a new PathError for the current path, wrapping the given +// error. +func (p Path) NewError(err error) error { + // if we're being asked to wrap an existing PathError then our new + // PathError will be the concatenation of the two paths, ensuring + // that we still get a single flat PathError that's thus easier for + // callers to deal with. + perr, wrappingPath := err.(PathError) + pathLen := len(p) + if wrappingPath { + pathLen = pathLen + len(perr.Path) + } + + sPath := make(Path, pathLen) + copy(sPath, p) + if wrappingPath { + copy(sPath[len(p):], perr.Path) + } + + return PathError{ + error: err, + Path: sPath, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gob.go b/vendor/github.com/zclconf/go-cty/cty/gob.go new file mode 100644 index 0000000..a77dace --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gob.go @@ -0,0 +1,125 @@ +package cty + +import ( + "bytes" + "encoding/gob" + "fmt" + "math/big" +) + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Values to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent values of capsule types in gob, +// because the types themselves cannot be represented. +func (val Value) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gv := gobValue{ + Version: 0, + Ty: val.ty, + V: val.v, + } + + err := enc.Encode(gv) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Value: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementation of the gob.GobDecoder interface, which +// inverts the operation performed by GobEncode. See the documentation of +// GobEncode for considerations when using cty.Value instances with gob. +func (val *Value) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gv gobValue + err := dec.Decode(&gv) + if err != nil { + return fmt.Errorf("error decoding cty.Value: %s", err) + } + if gv.Version != 0 { + return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version) + } + + // big.Float seems to, for some reason, lose its "pointerness" when we + // round-trip it, so we'll fix that here. + if bf, ok := gv.V.(big.Float); ok { + gv.V = &bf + } + + val.ty = gv.Ty + val.v = gv.V + + return nil +} + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Types to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent capsule types in gob. +func (t Type) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gt := gobType{ + Version: 0, + Impl: t.typeImpl, + } + + err := enc.Encode(gt) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Type: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementatino of the gob.GobDecoder interface, which +// reverses the encoding performed by GobEncode to allow types to be recovered +// from gob buffers. +func (t *Type) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gt gobType + err := dec.Decode(>) + if err != nil { + return fmt.Errorf("error decoding cty.Type: %s", err) + } + if gt.Version != 0 { + return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version) + } + + t.typeImpl = gt.Impl + + return nil +} + +// Capsule types cannot currently be gob-encoded, because they rely on pointer +// equality and we have no way to recover the original pointer on decode. +func (t *capsuleType) GobEncode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +func (t *capsuleType) GobDecode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +type gobValue struct { + Version int + Ty Type + V interface{} +} + +type gobType struct { + Version int + Impl typeImpl +} + +type gobCapsuleTypeImpl struct { +} diff --git a/vendor/github.com/zclconf/go-cty/cty/helper.go b/vendor/github.com/zclconf/go-cty/cty/helper.go new file mode 100644 index 0000000..1b88e9f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/helper.go @@ -0,0 +1,99 @@ +package cty + +import ( + "fmt" +) + +// anyUnknown is a helper to easily check if a set of values contains any +// unknowns, for operations that short-circuit to return unknown in that case. +func anyUnknown(values ...Value) bool { + for _, val := range values { + if val.v == unknown { + return true + } + } + return false +} + +// typeCheck tests whether all of the given values belong to the given type. +// If the given types are a mixture of the given type and the dynamic +// pseudo-type then a short-circuit dynamic value is returned. If the given +// values are all of the correct type but at least one is unknown then +// a short-circuit unknown value is returned. If any other types appear then +// an error is returned. Otherwise (finally!) the result is nil, nil. +func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) { + hasDynamic := false + hasUnknown := false + + for i, val := range values { + if val.ty == DynamicPseudoType { + hasDynamic = true + continue + } + + if !val.Type().Equals(required) { + return nil, fmt.Errorf( + "type mismatch: want %s but value %d is %s", + required.FriendlyName(), + i, val.ty.FriendlyName(), + ) + } + + if val.v == unknown { + hasUnknown = true + } + } + + if hasDynamic { + return &DynamicVal, nil + } + + if hasUnknown { + ret := UnknownVal(ret) + return &ret, nil + } + + return nil, nil +} + +// mustTypeCheck is a wrapper around typeCheck that immediately panics if +// any error is returned. +func mustTypeCheck(required Type, ret Type, values ...Value) *Value { + shortCircuit, err := typeCheck(required, ret, values...) + if err != nil { + panic(err) + } + return shortCircuit +} + +// shortCircuitForceType takes the return value from mustTypeCheck and +// replaces it with an unknown of the given type if the original value was +// DynamicVal. +// +// This is useful for operations that are specified to always return a +// particular type, since then a dynamic result can safely be "upgrade" to +// a strongly-typed unknown, which then allows subsequent operations to +// be actually type-checked. +// +// It is safe to use this only if the operation in question is defined as +// returning either a value of the given type or panicking, since we know +// then that subsequent operations won't run if the operation panics. +// +// If the given short-circuit value is *not* DynamicVal then it must be +// of the given type, or this function will panic. +func forceShortCircuitType(shortCircuit *Value, ty Type) *Value { + if shortCircuit == nil { + return nil + } + + if shortCircuit.ty == DynamicPseudoType { + ret := UnknownVal(ty) + return &ret + } + + if !shortCircuit.ty.Equals(ty) { + panic("forceShortCircuitType got value of wrong type") + } + + return shortCircuit +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json.go b/vendor/github.com/zclconf/go-cty/cty/json.go new file mode 100644 index 0000000..c421a62 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json.go @@ -0,0 +1,176 @@ +package cty + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// MarshalJSON is an implementation of json.Marshaler that allows Type +// instances to be serialized as JSON. +// +// All standard types can be serialized, but capsule types cannot since there +// is no way to automatically recover the original pointer and capsule types +// compare by equality. +func (t Type) MarshalJSON() ([]byte, error) { + switch impl := t.typeImpl.(type) { + case primitiveType: + switch impl.Kind { + case primitiveTypeBool: + return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil + case primitiveTypeNumber: + return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil + case primitiveTypeString: + return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil + default: + panic("unknown primitive type kind") + } + case typeList, typeMap, typeSet: + buf := &bytes.Buffer{} + etyJSON, err := t.ElementType().MarshalJSON() + if err != nil { + return nil, err + } + buf.WriteRune('[') + switch impl.(type) { + case typeList: + buf.WriteString(`"list"`) + case typeMap: + buf.WriteString(`"map"`) + case typeSet: + buf.WriteString(`"set"`) + } + buf.WriteRune(',') + buf.Write(etyJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeObject: + buf := &bytes.Buffer{} + atysJSON, err := json.Marshal(t.AttributeTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["object",`) + buf.Write(atysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeTuple: + buf := &bytes.Buffer{} + etysJSON, err := json.Marshal(t.TupleElementTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["tuple",`) + buf.Write(etysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case pseudoTypeDynamic: + return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil + case *capsuleType: + return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName()) + default: + // should never happen + panic("unknown type implementation") + } +} + +// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of +// MarshalJSON for information on the limitations of JSON serialization of +// types. +func (t *Type) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + *t = Bool + case "number": + *t = Number + case "string": + *t = String + case "dynamic": + *t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = List(ety) + case "map": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Map(ety) + case "set": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Set(ety) + case "object": + var atys map[string]Type + err = dec.Decode(&atys) + if err != nil { + return err + } + *t = Object(atys) + case "tuple": + var etys []Type + err = dec.Decode(&etys) + if err != nil { + return err + } + *t = Tuple(etys) + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/list_type.go b/vendor/github.com/zclconf/go-cty/cty/list_type.go new file mode 100644 index 0000000..2ef02a1 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/list_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeList struct { + typeImplSigil + ElementTypeT Type +} + +// List creates a map type with the given element Type. +// +// List types are CollectionType implementations. +func List(elem Type) Type { + return Type{ + typeList{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a list whose element type is +// equal to that of the receiver. +func (t typeList) Equals(other Type) bool { + ot, isList := other.typeImpl.(typeList) + if !isList { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeList) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "list of " + elemName +} + +func (t typeList) ElementType() Type { + return t.ElementTypeT +} + +func (t typeList) GoString() string { + return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT) +} + +// IsListType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsListType() bool { + _, ok := t.typeImpl.(typeList) + return ok +} + +// ListElementType is a convenience method that checks if the given type is +// a list type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.ListElementType(); et != nil { +// // Do something with *et +// } +func (t Type) ListElementType() *Type { + if lt, ok := t.typeImpl.(typeList); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/map_type.go b/vendor/github.com/zclconf/go-cty/cty/map_type.go new file mode 100644 index 0000000..82d36c6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/map_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeMap struct { + typeImplSigil + ElementTypeT Type +} + +// Map creates a map type with the given element Type. +// +// Map types are CollectionType implementations. +func Map(elem Type) Type { + return Type{ + typeMap{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a map whose element type is +// equal to that of the receiver. +func (t typeMap) Equals(other Type) bool { + ot, isMap := other.typeImpl.(typeMap) + if !isMap { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeMap) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "map of " + elemName +} + +func (t typeMap) ElementType() Type { + return t.ElementTypeT +} + +func (t typeMap) GoString() string { + return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT) +} + +// IsMapType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsMapType() bool { + _, ok := t.typeImpl.(typeMap) + return ok +} + +// MapElementType is a convenience method that checks if the given type is +// a map type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.MapElementType(); et != nil { +// // Do something with *et +// } +func (t Type) MapElementType() *Type { + if lt, ok := t.typeImpl.(typeMap); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go new file mode 100644 index 0000000..1eb99f2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go @@ -0,0 +1,14 @@ +// Package msgpack provides functions for serializing cty values in the +// msgpack encoding, and decoding them again. +// +// If the same type information is provided both at encoding and decoding time +// then values can be round-tripped without loss, except for capsule types +// which are not currently supported. +// +// If any unknown values are passed to Marshal then they will be represented +// using a msgpack extension with type code zero, which is understood by +// the Unmarshal function within this package but will not be understood by +// a generic (non-cty-aware) msgpack decoder. Ensure that no unknown values +// are used if interoperability with other msgpack implementations is +// required. +package msgpack diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go new file mode 100644 index 0000000..1b631d0 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go @@ -0,0 +1,31 @@ +package msgpack + +import ( + "bytes" + + "github.com/vmihailenco/msgpack" + "github.com/zclconf/go-cty/cty" +) + +type dynamicVal struct { + Value cty.Value + Path cty.Path +} + +func (dv *dynamicVal) MarshalMsgpack() ([]byte, error) { + // Rather than defining a msgpack-specific serialization of types, + // instead we use the existing JSON serialization. + typeJSON, err := dv.Value.Type().MarshalJSON() + if err != nil { + return nil, dv.Path.NewErrorf("failed to serialize type: %s", err) + } + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + enc.EncodeArrayLen(2) + enc.EncodeBytes(typeJSON) + err = marshal(dv.Value, dv.Value.Type(), dv.Path, enc) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go new file mode 100644 index 0000000..6db0815 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go @@ -0,0 +1,8 @@ +package msgpack + +import ( + "math" +) + +var negativeInfinity = math.Inf(-1) +var positiveInfinity = math.Inf(1) diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go new file mode 100644 index 0000000..87b096c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go @@ -0,0 +1,207 @@ +package msgpack + +import ( + "bytes" + "math/big" + "sort" + + "github.com/vmihailenco/msgpack" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Marshal produces a msgpack serialization of the given value that +// can be decoded into the given type later using Unmarshal. +// +// The given value must conform to the given type, or an error will +// be returned. +func Marshal(val cty.Value, ty cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(ty) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, ty) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + var path cty.Path + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + + err := marshal(val, ty, path, enc) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error { + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, enc) + } + + if !val.IsKnown() { + err := enc.Encode(unknownVal) + if err != nil { + return path.NewError(err) + } + return nil + } + if val.IsNull() { + err := enc.EncodeNil() + if err != nil { + return path.NewError(err) + } + return nil + } + + // The caller should've guaranteed that the given val is conformant with + // the given type ty, so we'll proceed under that assumption here. + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + err := enc.EncodeString(val.AsString()) + if err != nil { + return path.NewError(err) + } + return nil + case cty.Number: + var err error + switch { + case val.RawEquals(cty.PositiveInfinity): + err = enc.EncodeFloat64(positiveInfinity) + case val.RawEquals(cty.NegativeInfinity): + err = enc.EncodeFloat64(negativeInfinity) + default: + bf := val.AsBigFloat() + if iv, acc := bf.Int64(); acc == big.Exact { + err = enc.EncodeInt(iv) + } else if fv, acc := bf.Float64(); acc == big.Exact { + err = enc.EncodeFloat64(fv) + } else { + err = enc.EncodeString(bf.Text('f', -1)) + } + } + if err != nil { + return path.NewError(err) + } + return nil + case cty.Bool: + err := enc.EncodeBool(val.True()) + if err != nil { + return path.NewError(err) + } + return nil + default: + panic("unsupported primitive type") + } + case ty.IsListType(), ty.IsSetType(): + enc.EncodeArrayLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsMapType(): + enc.EncodeMapLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, enc) + if err != nil { + return err + } + err = marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + enc.EncodeArrayLen(len(etys)) + for it.Next() { + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + i++ + } + return nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + enc.EncodeMapLen(len(names)) + + for _, k := range names { + aty := atys[k] + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, enc) + if err != nil { + return err + } + err = marshal(av, aty, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsCapsuleType(): + return path.NewErrorf("capsule types not supported for msgpack encoding") + default: + // should never happen + return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error { + dv := dynamicVal{ + Value: val, + Path: path, + } + return enc.Encode(&dv) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go new file mode 100644 index 0000000..6f6022e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go @@ -0,0 +1,167 @@ +package msgpack + +import ( + "bytes" + "fmt" + "io" + + "github.com/vmihailenco/msgpack" + msgpackcodes "github.com/vmihailenco/msgpack/codes" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// msgpack-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary msgpack without explicit cty Type +// information. +// +// The rules are as follows: +// +// msgpack strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// msgpack maps become cty object types, with the attributes defined by the +// map keys and the types of their values. +// +// msgpack arrays become cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any unknown values are similarly typed as DynamicPseudoType, because these +// do not carry type information on the wire. +// +// Any parse errors will be returned as an error, and the type will be the +// invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := msgpack.NewDecoder(r) + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + // We must now be at the end of the buffer + err = dec.Skip() + if err != io.EOF { + return ty, fmt.Errorf("extra bytes after msgpack value") + } + + return ty, nil +} + +func impliedType(dec *msgpack.Decoder) (cty.Type, error) { + // If this function returns with a nil error then it must have already + // consumed the next value from the decoder, since when called recursively + // the caller will be expecting to find a following value here. + + code, err := dec.PeekCode() + if err != nil { + return cty.NilType, err + } + + switch { + + case code == msgpackcodes.Nil || msgpackcodes.IsExt(code): + err := dec.Skip() + return cty.DynamicPseudoType, err + + case code == msgpackcodes.True || code == msgpackcodes.False: + _, err := dec.DecodeBool() + return cty.Bool, err + + case msgpackcodes.IsFixedNum(code): + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64: + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64: + _, err := dec.DecodeUint64() + return cty.Number, err + + case code == msgpackcodes.Float || code == msgpackcodes.Double: + _, err := dec.DecodeFloat64() + return cty.Number, err + + case msgpackcodes.IsString(code): + _, err := dec.DecodeString() + return cty.String, err + + case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32: + return impliedObjectType(dec) + + case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32: + return impliedTupleType(dec) + + default: + return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code) + } +} + +func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of map. + l, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + var atys map[string]cty.Type + + for i := 0; i < l; i++ { + // Read the map key first. We require maps to be strings, but msgpack + // doesn't so we're prepared to error here if not. + k, err := dec.DecodeString() + if err != nil { + return cty.DynamicPseudoType, err + } + + aty, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[k] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of array. + l, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + if l == 0 { + return cty.EmptyTuple, nil + } + + etys := make([]cty.Type, l) + + for i := 0; i < l; i++ { + ety, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + etys[i] = ety + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go new file mode 100644 index 0000000..6507bc4 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go @@ -0,0 +1,16 @@ +package msgpack + +type unknownType struct{} + +var unknownVal = unknownType{} + +// unknownValBytes is the raw bytes of the msgpack fixext1 value we +// write to represent an unknown value. It's an extension value of +// type zero whose value is irrelevant. Since it's irrelevant, we +// set it to a single byte whose value is also zero, since that's +// the most compact possible representation. +var unknownValBytes = []byte{0xd4, 0, 0} + +func (uv unknownType) MarshalMsgpack() ([]byte, error) { + return unknownValBytes, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go new file mode 100644 index 0000000..450804f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go @@ -0,0 +1,311 @@ +package msgpack + +import ( + "bytes" + "math/big" + + "github.com/vmihailenco/msgpack" + msgpackCodes "github.com/vmihailenco/msgpack/codes" + "github.com/zclconf/go-cty/cty" +) + +// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of +// the given type, returning the result. +// +// If an error is returned, the error is written with a hypothetical +// end-user that wrote the msgpack file as its audience, using cty type +// system concepts rather than Go type system concepts. +func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) { + r := bytes.NewReader(b) + dec := msgpack.NewDecoder(r) + + var path cty.Path + return unmarshal(dec, ty, path) +} + +func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + if msgpackCodes.IsExt(peek) { + // We just assume _all_ extensions are unknown values, + // since we don't have any other extensions. + return cty.UnknownVal(ty), nil + } + if ty == cty.DynamicPseudoType { + return unmarshalDynamic(dec, path) + } + if peek == msgpackCodes.Nil { + return cty.NullVal(ty), nil + } + + switch { + case ty.IsPrimitiveType(): + val, err := unmarshalPrimitive(dec, ty, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case ty.IsListType(): + return unmarshalList(dec, ty.ElementType(), path) + case ty.IsSetType(): + return unmarshalSet(dec, ty.ElementType(), path) + case ty.IsMapType(): + return unmarshalMap(dec, ty.ElementType(), path) + case ty.IsTupleType(): + return unmarshalTuple(dec, ty.TupleElementTypes(), path) + case ty.IsObjectType(): + return unmarshalObject(dec, ty.AttributeTypes(), path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName()) + } +} + +func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + switch ty { + case cty.Bool: + rv, err := dec.DecodeBool() + if err != nil { + return cty.DynamicVal, path.NewErrorf("bool is required") + } + return cty.BoolVal(rv), nil + case cty.Number: + // Marshal will try int and float first, if the value can be + // losslessly represented in these encodings, and then fall + // back on a string if the number is too large or too precise. + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + + if msgpackCodes.IsFixedNum(peek) { + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + } + + switch peek { + case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: + rv, err := dec.DecodeUint64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberUIntVal(rv), nil + case msgpackCodes.Float, msgpackCodes.Double: + rv, err := dec.DecodeFloat64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberFloatVal(rv), nil + default: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + bf := &big.Float{} + _, _, err = bf.Parse(rv, 10) + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberVal(bf), nil + } + case cty.String: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("string is required") + } + return cty.StringVal(rv), nil + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a list is required") + } + + if length == 0 { + return cty.ListValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a set is required") + } + + if length == 0 { + return cty.SetValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a map is required") + } + + if length == 0 { + return cty.MapValEmpty(ety), nil + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + path[:len(path)-1].NewErrorf("non-string key in map") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a tuple is required") + } + + if length != len(etys) { + return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys)) + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + ety := etys[i] + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("an object is required") + } + + if length != len(atys) { + return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required", len(atys)) + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + aty, exists := atys[key] + if !exists { + return cty.DynamicVal, path.NewErrorf("unsupported attribute") + } + + val, err := unmarshal(dec, aty, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + if length != 2 { + return cty.DynamicVal, path.NewErrorf( + "dynamic value array must have exactly two elements", + ) + } + + typeJSON, err := dec.DecodeBytes() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + var ty cty.Type + err = (&ty).UnmarshalJSON(typeJSON) + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + return unmarshal(dec, ty, path) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/null.go b/vendor/github.com/zclconf/go-cty/cty/null.go new file mode 100644 index 0000000..d58d028 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/null.go @@ -0,0 +1,14 @@ +package cty + +// NullVal returns a null value of the given type. A null can be created of any +// type, but operations on such values will always panic. Calling applications +// are encouraged to use nulls only sparingly, particularly when user-provided +// expressions are to be evaluated, since the precence of nulls creates a +// much higher chance of evaluation errors that can't be caught by a type +// checker. +func NullVal(t Type) Value { + return Value{ + ty: t, + v: nil, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/object_type.go b/vendor/github.com/zclconf/go-cty/cty/object_type.go new file mode 100644 index 0000000..187d387 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/object_type.go @@ -0,0 +1,135 @@ +package cty + +import ( + "fmt" +) + +type typeObject struct { + typeImplSigil + AttrTypes map[string]Type +} + +// Object creates an object type with the given attribute types. +// +// After a map is passed to this function the caller must no longer access it, +// since ownership is transferred to this library. +func Object(attrTypes map[string]Type) Type { + attrTypesNorm := make(map[string]Type, len(attrTypes)) + for k, v := range attrTypes { + attrTypesNorm[NormalizeString(k)] = v + } + + return Type{ + typeObject{ + AttrTypes: attrTypesNorm, + }, + } +} + +func (t typeObject) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeObject); ok { + if len(t.AttrTypes) != len(ot.AttrTypes) { + // Fast path: if we don't have the same number of attributes + // then we can't possibly be equal. This also avoids the need + // to test attributes in both directions below, since we know + // there can't be extras in "other". + return false + } + + for attr, ty := range t.AttrTypes { + oty, ok := ot.AttrTypes[attr] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeObject) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write an object type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // an object type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "object" +} + +func (t typeObject) GoString() string { + if len(t.AttrTypes) == 0 { + return "cty.EmptyObject" + } + return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes) +} + +// EmptyObject is a shorthand for Object(map[string]Type{}), to more +// easily talk about the empty object type. +var EmptyObject Type + +// EmptyObjectVal is the only possible non-null, non-unknown value of type +// EmptyObject. +var EmptyObjectVal Value + +func init() { + EmptyObject = Object(map[string]Type{}) + EmptyObjectVal = Value{ + ty: EmptyObject, + v: map[string]interface{}{}, + } +} + +// IsObjectType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsObjectType() bool { + _, ok := t.typeImpl.(typeObject) + return ok +} + +// HasAttribute returns true if the receiver has an attribute with the given +// name, regardless of its type. Will panic if the reciever isn't an object +// type; use IsObjectType to determine whether this operation will succeed. +func (t Type) HasAttribute(name string) bool { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + _, hasAttr := ot.AttrTypes[name] + return hasAttr + } + panic("HasAttribute on non-object Type") +} + +// AttributeType returns the type of the attribute with the given name. Will +// panic if the receiver is not an object type (use IsObjectType to confirm) +// or if the object type has no such attribute (use HasAttribute to confirm). +func (t Type) AttributeType(name string) Type { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + aty, hasAttr := ot.AttrTypes[name] + if !hasAttr { + panic("no such attribute") + } + return aty + } + panic("AttributeType on non-object Type") +} + +// AttributeTypes returns a map from attribute names to their associated +// types. Will panic if the receiver is not an object type (use IsObjectType +// to confirm). +// +// The returned map is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the returned +// map. For many purposes the attribute-related methods of Value are more +// appropriate and more convenient to use. +func (t Type) AttributeTypes() map[string]Type { + if ot, ok := t.typeImpl.(typeObject); ok { + return ot.AttrTypes + } + panic("AttributeTypes on non-object Type") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/path.go b/vendor/github.com/zclconf/go-cty/cty/path.go new file mode 100644 index 0000000..84a9de0 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/path.go @@ -0,0 +1,186 @@ +package cty + +import ( + "errors" + "fmt" +) + +// A Path is a sequence of operations to locate a nested value within a +// data structure. +// +// The empty Path represents the given item. Any PathSteps within represent +// taking a single step down into a data structure. +// +// Path has some convenience methods for gradually constructing a path, +// but callers can also feel free to just produce a slice of PathStep manually +// and convert to this type, which may be more appropriate in environments +// where memory pressure is a concern. +type Path []PathStep + +// PathStep represents a single step down into a data structure, as part +// of a Path. PathStep is a closed interface, meaning that the only +// permitted implementations are those within this package. +type PathStep interface { + pathStepSigil() pathStepImpl + Apply(Value) (Value, error) +} + +// embed pathImpl into a struct to declare it a PathStep implementation +type pathStepImpl struct{} + +func (p pathStepImpl) pathStepSigil() pathStepImpl { + return p +} + +// Index returns a new Path that is the reciever with an IndexStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) Index(v Value) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = IndexStep{ + Key: v, + } + return ret +} + +// GetAttr returns a new Path that is the reciever with a GetAttrStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) GetAttr(name string) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = GetAttrStep{ + Name: name, + } + return ret +} + +// Apply applies each of the steps in turn to successive values starting with +// the given value, and returns the result. If any step returns an error, +// the whole operation returns an error. +func (p Path) Apply(val Value) (Value, error) { + var err error + for i, step := range p { + val, err = step.Apply(val) + if err != nil { + return NilVal, fmt.Errorf("at step %d: %s", i, err) + } + } + return val, nil +} + +// LastStep applies the given path up to the last step and then returns +// the resulting value and the final step. +// +// This is useful when dealing with assignment operations, since in that +// case the *value* of the last step is not important (and may not, in fact, +// present at all) and we care only about its location. +// +// Since LastStep applies all steps except the last, it will return errors +// for those steps in the same way as Apply does. +// +// If the path has *no* steps then the returned PathStep will be nil, +// representing that any operation should be applied directly to the +// given value. +func (p Path) LastStep(val Value) (Value, PathStep, error) { + var err error + + if len(p) == 0 { + return val, nil, nil + } + + journey := p[:len(p)-1] + val, err = journey.Apply(val) + if err != nil { + return NilVal, nil, err + } + return val, p[len(p)-1], nil +} + +// Copy makes a shallow copy of the receiver. Often when paths are passed to +// caller code they come with the constraint that they are valid only until +// the caller returns, due to how they are constructed internally. Callers +// can use Copy to conveniently produce a copy of the value that _they_ control +// the validity of. +func (p Path) Copy() Path { + ret := make(Path, len(p)) + copy(ret, p) + return ret +} + +// IndexStep is a Step implementation representing applying the index operation +// to a value, which must be of either a list, map, or set type. +// +// When describing a path through a *type* rather than a concrete value, +// the Key may be an unknown value, indicating that the step applies to +// *any* key of the given type. +// +// When indexing into a set, the Key is actually the element being accessed +// itself, since in sets elements are their own identity. +type IndexStep struct { + pathStepImpl + Key Value +} + +// Apply returns the value resulting from indexing the given value with +// our key value. +func (s IndexStep) Apply(val Value) (Value, error) { + switch s.Key.Type() { + case Number: + if !val.Type().IsListType() { + return NilVal, errors.New("not a list type") + } + case String: + if !val.Type().IsMapType() { + return NilVal, errors.New("not a map type") + } + default: + return NilVal, errors.New("key value not number or string") + } + + has := val.HasIndex(s.Key) + if !has.IsKnown() { + return UnknownVal(val.Type().ElementType()), nil + } + if !has.True() { + return NilVal, errors.New("value does not have given index key") + } + + return val.Index(s.Key), nil +} + +func (s IndexStep) GoString() string { + return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key) +} + +// GetAttrStep is a Step implementation representing retrieving an attribute +// from a value, which must be of an object type. +type GetAttrStep struct { + pathStepImpl + Name string +} + +// Apply returns the value of our named attribute from the given value, which +// must be of an object type that has a value of that name. +func (s GetAttrStep) Apply(val Value) (Value, error) { + if !val.Type().IsObjectType() { + return NilVal, errors.New("not an object type") + } + + if !val.Type().HasAttribute(s.Name) { + return NilVal, fmt.Errorf("object has no attribute %q", s.Name) + } + + return val.GetAttr(s.Name), nil +} + +func (s GetAttrStep) GoString() string { + return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go new file mode 100644 index 0000000..7b3d119 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -0,0 +1,122 @@ +package cty + +import "math/big" + +// primitiveType is the hidden implementation of the various primitive types +// that are exposed as variables in this package. +type primitiveType struct { + typeImplSigil + Kind primitiveTypeKind +} + +type primitiveTypeKind byte + +const ( + primitiveTypeBool primitiveTypeKind = 'B' + primitiveTypeNumber primitiveTypeKind = 'N' + primitiveTypeString primitiveTypeKind = 'S' +) + +func (t primitiveType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(primitiveType); ok { + return otherP.Kind == t.Kind + } + return false +} + +func (t primitiveType) FriendlyName(mode friendlyTypeNameMode) string { + switch t.Kind { + case primitiveTypeBool: + return "bool" + case primitiveTypeNumber: + return "number" + case primitiveTypeString: + return "string" + default: + // should never happen + panic("invalid primitive type") + } +} + +func (t primitiveType) GoString() string { + switch t.Kind { + case primitiveTypeBool: + return "cty.Bool" + case primitiveTypeNumber: + return "cty.Number" + case primitiveTypeString: + return "cty.String" + default: + // should never happen + panic("invalid primitive type") + } +} + +// Number is the numeric type. Number values are arbitrary-precision +// decimal numbers, which can then be converted into Go's various numeric +// types only if they are in the appropriate range. +var Number Type + +// String is the string type. String values are sequences of unicode codepoints +// encoded internally as UTF-8. +var String Type + +// Bool is the boolean type. The two values of this type are True and False. +var Bool Type + +// True is the truthy value of type Bool +var True Value + +// False is the falsey value of type Bool +var False Value + +// Zero is a number value representing exactly zero. +var Zero Value + +// PositiveInfinity is a Number value representing positive infinity +var PositiveInfinity Value + +// NegativeInfinity is a Number value representing negative infinity +var NegativeInfinity Value + +func init() { + Number = Type{ + primitiveType{Kind: primitiveTypeNumber}, + } + String = Type{ + primitiveType{Kind: primitiveTypeString}, + } + Bool = Type{ + primitiveType{Kind: primitiveTypeBool}, + } + True = Value{ + ty: Bool, + v: true, + } + False = Value{ + ty: Bool, + v: false, + } + Zero = Value{ + ty: Number, + v: big.NewFloat(0), + } + PositiveInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(false), + } + NegativeInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(true), + } +} + +// IsPrimitiveType returns true if and only if the reciever is a primitive +// type, which means it's either number, string, or bool. Any two primitive +// types can be safely compared for equality using the standard == operator +// without panic, which is not a guarantee that holds for all types. Primitive +// types can therefore also be used in switch statements. +func (t Type) IsPrimitiveType() bool { + _, ok := t.typeImpl.(primitiveType) + return ok +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/gob.go b/vendor/github.com/zclconf/go-cty/cty/set/gob.go new file mode 100644 index 0000000..da2978f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/gob.go @@ -0,0 +1,76 @@ +package set + +import ( + "bytes" + "encoding/gob" + "fmt" +) + +// GobEncode is an implementation of the interface gob.GobEncoder, allowing +// sets to be included in structures encoded via gob. +// +// The set rules are included in the serialized value, so the caller must +// register its concrete rules type with gob.Register before using a +// set in a gob, and possibly also implement GobEncode/GobDecode to customize +// how any parameters are persisted. +// +// The set elements are also included, so if they are of non-primitive types +// they too must be registered with gob. +// +// If the produced gob values will persist for a long time, the caller must +// ensure compatibility of the rules implementation. In particular, if the +// definition of element equivalence changes between encoding and decoding +// then two distinct stored elements may be considered equivalent on decoding, +// causing the recovered set to have fewer elements than when it was stored. +func (s Set) GobEncode() ([]byte, error) { + gs := gobSet{ + Version: 0, + Rules: s.rules, + Values: s.Values(), + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + err := enc.Encode(gs) + if err != nil { + return nil, fmt.Errorf("error encoding set.Set: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is the opposite of GobEncode. See GobEncode for information +// on the requirements for and caveats of including set values in gobs. +func (s *Set) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gs gobSet + err := dec.Decode(&gs) + if err != nil { + return fmt.Errorf("error decoding set.Set: %s", err) + } + if gs.Version != 0 { + return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version) + } + + victim := NewSetFromSlice(gs.Rules, gs.Values) + s.vals = victim.vals + s.rules = victim.rules + return nil +} + +type gobSet struct { + Version int + Rules Rules + + // The bucket-based representation is for efficient in-memory access, but + // for serialization it's enough to just retain the values themselves, + // which we can re-bucket using the rules (which may have changed!) when + // we re-inflate. + Values []interface{} +} + +func init() { + gob.Register([]interface{}(nil)) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/iterator.go b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go new file mode 100644 index 0000000..f15498e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go @@ -0,0 +1,36 @@ +package set + +type Iterator struct { + bucketIds []int + vals map[int][]interface{} + bucketIdx int + valIdx int +} + +func (it *Iterator) Value() interface{} { + return it.currentBucket()[it.valIdx] +} + +func (it *Iterator) Next() bool { + if it.bucketIdx == -1 { + // init + if len(it.bucketIds) == 0 { + return false + } + + it.valIdx = 0 + it.bucketIdx = 0 + return true + } + + it.valIdx++ + if it.valIdx >= len(it.currentBucket()) { + it.valIdx = 0 + it.bucketIdx++ + } + return it.bucketIdx < len(it.bucketIds) +} + +func (it *Iterator) currentBucket() []interface{} { + return it.vals[it.bucketIds[it.bucketIdx]] +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/ops.go b/vendor/github.com/zclconf/go-cty/cty/set/ops.go new file mode 100644 index 0000000..726e707 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/ops.go @@ -0,0 +1,199 @@ +package set + +import ( + "sort" +) + +// Add inserts the given value into the receiving Set. +// +// This mutates the set in-place. This operation is not thread-safe. +func (s Set) Add(val interface{}) { + hv := s.rules.Hash(val) + if _, ok := s.vals[hv]; !ok { + s.vals[hv] = make([]interface{}, 0, 1) + } + bucket := s.vals[hv] + + // See if an equivalent value is already present + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return + } + } + + s.vals[hv] = append(bucket, val) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s Set) Remove(val interface{}) { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return + } + + for i, ev := range bucket { + if s.rules.Equivalent(val, ev) { + newBucket := make([]interface{}, 0, len(bucket)-1) + newBucket = append(newBucket, bucket[:i]...) + newBucket = append(newBucket, bucket[i+1:]...) + if len(newBucket) > 0 { + s.vals[hv] = newBucket + } else { + delete(s.vals, hv) + } + return + } + } +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s Set) Has(val interface{}) bool { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return false + } + + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return true + } + } + return false +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s Set) Copy() Set { + ret := NewSet(s.rules) + for k, v := range s.vals { + ret.vals[k] = v + } + return ret +} + +// Iterator returns an iterator over values in the set, in an undefined order +// that callers should not depend on. +// +// The pattern for using the returned iterator is: +// +// it := set.Iterator() +// for it.Next() { +// val := it.Value() +// // ... +// } +// +// Once an iterator has been created for a set, the set *must not* be mutated +// until the iterator is no longer in use. +func (s Set) Iterator() *Iterator { + // Sort the bucketIds to ensure that we always traverse in a + // consistent order. + bucketIds := make([]int, 0, len(s.vals)) + for id := range s.vals { + bucketIds = append(bucketIds, id) + } + sort.Ints(bucketIds) + + return &Iterator{ + bucketIds: bucketIds, + vals: s.vals, + bucketIdx: -1, + } +} + +// EachValue calls the given callback once for each value in the set, in an +// undefined order that callers should not depend on. +func (s Set) EachValue(cb func(interface{})) { + it := s.Iterator() + for it.Next() { + cb(it.Value()) + } +} + +// Values returns a slice of all of the values in the set in no particular +// order. This is just a wrapper around EachValue that accumulates the results +// in a slice for caller convenience. +// +// The returned slice will be nil if there are no values in the set. +func (s Set) Values() []interface{} { + var ret []interface{} + s.EachValue(func(v interface{}) { + ret = append(ret, v) + }) + return ret +} + +// Length returns the number of values in the set. +func (s Set) Length() int { + var count int + for _, bucket := range s.vals { + count = count + len(bucket) + } + return count +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same rules, or +// else this function will panic. +func (s1 Set) Union(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + rs.Add(v) + }) + s2.EachValue(func(v interface{}) { + rs.Add(v) + }) + return rs +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Intersection(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Subtract(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same rules, or else this function will +// panic. +func (s1 Set) SymmetricDifference(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + rs.Add(v) + } + }) + return rs +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/rules.go b/vendor/github.com/zclconf/go-cty/cty/set/rules.go new file mode 100644 index 0000000..7200184 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/rules.go @@ -0,0 +1,25 @@ +package set + +// Rules represents the operations that define membership for a Set. +// +// Each Set has a Rules instance, whose methods must satisfy the interface +// contracts given below for any value that will be added to the set. +type Rules interface { + // Hash returns an int that somewhat-uniquely identifies the given value. + // + // A good hash function will minimize collisions for values that will be + // added to the set, though collisions *are* permitted. Collisions will + // simply reduce the efficiency of operations on the set. + Hash(interface{}) int + + // Equivalent returns true if and only if the two values are considered + // equivalent for the sake of set membership. Two values that are + // equivalent cannot exist in the set at the same time, and if two + // equivalent values are added it is undefined which one will be + // returned when enumerating all of the set members. + // + // Two values that are equivalent *must* result in the same hash value, + // though it is *not* required that two values with the same hash value + // be equivalent. + Equivalent(interface{}, interface{}) bool +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/set.go b/vendor/github.com/zclconf/go-cty/cty/set/set.go new file mode 100644 index 0000000..b4fb316 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/set.go @@ -0,0 +1,62 @@ +package set + +import ( + "fmt" +) + +// Set is an implementation of the concept of a set: a collection where all +// values are conceptually either in or out of the set, but the members are +// not ordered. +// +// This type primarily exists to be the internal type of sets in cty, but +// it is considered to be at the same level of abstraction as Go's built in +// slice and map collection types, and so should make no cty-specific +// assumptions. +// +// Set operations are not thread safe. It is the caller's responsibility to +// provide mutex guarantees where necessary. +// +// Set operations are not optimized to minimize memory pressure. Mutating +// a set will generally create garbage and so should perhaps be avoided in +// tight loops where memory pressure is a concern. +type Set struct { + vals map[int][]interface{} + rules Rules +} + +// NewSet returns an empty set with the membership rules given. +func NewSet(rules Rules) Set { + return Set{ + vals: map[int][]interface{}{}, + rules: rules, + } +} + +func NewSetFromSlice(rules Rules, vals []interface{}) Set { + s := NewSet(rules) + for _, v := range vals { + s.Add(v) + } + return s +} + +func sameRules(s1 Set, s2 Set) bool { + return s1.rules == s2.rules +} + +func mustHaveSameRules(s1 Set, s2 Set) { + if !sameRules(s1, s2) { + panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules)) + } +} + +// HasRules returns true if and only if the receiving set has the given rules +// instance as its rules. +func (s Set) HasRules(rules Rules) bool { + return s.rules == rules +} + +// Rules returns the receiving set's rules instance. +func (s Set) Rules() Rules { + return s.rules +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_helper.go b/vendor/github.com/zclconf/go-cty/cty/set_helper.go new file mode 100644 index 0000000..a88ddaf --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_helper.go @@ -0,0 +1,126 @@ +package cty + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/set" +) + +// ValueSet is to cty.Set what []cty.Value is to cty.List and +// map[string]cty.Value is to cty.Map. It's provided to allow callers a +// convenient interface for manipulating sets before wrapping them in cty.Set +// values using cty.SetValFromValueSet. +// +// Unlike value slices and value maps, ValueSet instances have a single +// homogenous element type because that is a requirement of the underlying +// set implementation, which uses the element type to select a suitable +// hashing function. +// +// Set mutations are not concurrency-safe. +type ValueSet struct { + // ValueSet is just a thin wrapper around a set.Set with our value-oriented + // "rules" applied. We do this so that the caller can work in terms of + // cty.Value objects even though the set internals use the raw values. + s set.Set +} + +// NewValueSet creates and returns a new ValueSet with the given element type. +func NewValueSet(ety Type) ValueSet { + return newValueSet(set.NewSet(setRules{Type: ety})) +} + +func newValueSet(s set.Set) ValueSet { + return ValueSet{ + s: s, + } +} + +// ElementType returns the element type for the receiving ValueSet. +func (s ValueSet) ElementType() Type { + return s.s.Rules().(setRules).Type +} + +// Add inserts the given value into the receiving set. +func (s ValueSet) Add(v Value) { + s.requireElementType(v) + s.s.Add(v.v) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s ValueSet) Remove(v Value) { + s.requireElementType(v) + s.s.Remove(v.v) +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s ValueSet) Has(v Value) bool { + s.requireElementType(v) + return s.s.Has(v.v) +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s ValueSet) Copy() ValueSet { + return newValueSet(s.s.Copy()) +} + +// Length returns the number of values in the set. +func (s ValueSet) Length() int { + return s.s.Length() +} + +// Values returns a slice of all of the values in the set in no particular +// order. +func (s ValueSet) Values() []Value { + l := s.s.Length() + if l == 0 { + return nil + } + ret := make([]Value, 0, l) + ety := s.ElementType() + for it := s.s.Iterator(); it.Next(); { + ret = append(ret, Value{ + ty: ety, + v: it.Value(), + }) + } + return ret +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same element type, +// or else this function will panic. +func (s ValueSet) Union(other ValueSet) ValueSet { + return newValueSet(s.s.Union(other.s)) +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Intersection(other ValueSet) ValueSet { + return newValueSet(s.s.Intersection(other.s)) +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Subtract(other ValueSet) ValueSet { + return newValueSet(s.s.Subtract(other.s)) +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same element type, or else this function +// will panic. +func (s ValueSet) SymmetricDifference(other ValueSet) ValueSet { + return newValueSet(s.s.SymmetricDifference(other.s)) +} + +// requireElementType panics if the given value is not of the set's element type. +func (s ValueSet) requireElementType(v Value) { + if !v.Type().Equals(s.ElementType()) { + panic(fmt.Errorf("attempt to use %#v value with set of %#v", v.Type(), s.ElementType())) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_internals.go b/vendor/github.com/zclconf/go-cty/cty/set_internals.go new file mode 100644 index 0000000..1d7a731 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_internals.go @@ -0,0 +1,158 @@ +package cty + +import ( + "bytes" + "fmt" + "hash/crc32" + "math/big" + "sort" +) + +// setRules provides a Rules implementation for the ./set package that +// respects the equality rules for cty values of the given type. +// +// This implementation expects that values added to the set will be +// valid internal values for the given Type, which is to say that wrapping +// the given value in a Value struct along with the ruleset's type should +// produce a valid, working Value. +type setRules struct { + Type Type +} + +// Hash returns a hash value for the receiver that can be used for equality +// checks where some inaccuracy is tolerable. +// +// The hash function is value-type-specific, so it is not meaningful to compare +// hash results for values of different types. +// +// This function is not safe to use for security-related applications, since +// the hash used is not strong enough. +func (val Value) Hash() int { + hashBytes := makeSetHashBytes(val) + return int(crc32.ChecksumIEEE(hashBytes)) +} + +func (r setRules) Hash(v interface{}) int { + return Value{ + ty: r.Type, + v: v, + }.Hash() +} + +func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + eqv := v1v.Equals(v2v) + + // By comparing the result to true we ensure that an Unknown result, + // which will result if either value is unknown, will be considered + // as non-equivalent. Two unknown values are not equivalent for the + // sake of set membership. + return eqv.v == true +} + +func makeSetHashBytes(val Value) []byte { + var buf bytes.Buffer + appendSetHashBytes(val, &buf) + return buf.Bytes() +} + +func appendSetHashBytes(val Value, buf *bytes.Buffer) { + // Exactly what bytes we generate here don't matter as long as the following + // constraints hold: + // - Unknown and null values all generate distinct strings from + // each other and from any normal value of the given type. + // - The delimiter used to separate items in a compound structure can + // never appear literally in any of its elements. + // Since we don't support hetrogenous lists we don't need to worry about + // collisions between values of different types, apart from + // PseudoTypeDynamic. + // If in practice we *do* get a collision then it's not a big deal because + // the Equivalent function will still distinguish values, but set + // performance will be best if we are able to produce a distinct string + // for each distinct value, unknown values notwithstanding. + if !val.IsKnown() { + buf.WriteRune('?') + return + } + if val.IsNull() { + buf.WriteRune('~') + return + } + + switch val.ty { + case Number: + buf.WriteString(val.v.(*big.Float).String()) + return + case Bool: + if val.v.(bool) { + buf.WriteRune('T') + } else { + buf.WriteRune('F') + } + return + case String: + buf.WriteString(fmt.Sprintf("%q", val.v.(string))) + return + } + + if val.ty.IsMapType() { + buf.WriteRune('{') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(keyVal, buf) + buf.WriteRune(':') + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('}') + return + } + + if val.ty.IsListType() || val.ty.IsSetType() { + buf.WriteRune('[') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune(']') + return + } + + if val.ty.IsObjectType() { + buf.WriteRune('<') + attrNames := make([]string, 0, len(val.ty.AttributeTypes())) + for attrName := range val.ty.AttributeTypes() { + attrNames = append(attrNames, attrName) + } + sort.Strings(attrNames) + for _, attrName := range attrNames { + appendSetHashBytes(val.GetAttr(attrName), buf) + buf.WriteRune(';') + } + buf.WriteRune('>') + return + } + + if val.ty.IsTupleType() { + buf.WriteRune('<') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('>') + return + } + + // should never get down here + panic("unsupported type in set hash") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_type.go b/vendor/github.com/zclconf/go-cty/cty/set_type.go new file mode 100644 index 0000000..cbc3706 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_type.go @@ -0,0 +1,72 @@ +package cty + +import ( + "fmt" +) + +type typeSet struct { + typeImplSigil + ElementTypeT Type +} + +// Set creates a set type with the given element Type. +// +// Set types are CollectionType implementations. +func Set(elem Type) Type { + return Type{ + typeSet{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a set whose element type is +// equal to that of the receiver. +func (t typeSet) Equals(other Type) bool { + ot, isSet := other.typeImpl.(typeSet) + if !isSet { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeSet) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "set of " + elemName +} + +func (t typeSet) ElementType() Type { + return t.ElementTypeT +} + +func (t typeSet) GoString() string { + return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT) +} + +// IsSetType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsSetType() bool { + _, ok := t.typeImpl.(typeSet) + return ok +} + +// SetElementType is a convenience method that checks if the given type is +// a set type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.SetElementType(); et != nil { +// // Do something with *et +// } +func (t Type) SetElementType() *Type { + if lt, ok := t.typeImpl.(typeSet); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/tuple_type.go b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go new file mode 100644 index 0000000..798cacd --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go @@ -0,0 +1,121 @@ +package cty + +import ( + "fmt" +) + +type typeTuple struct { + typeImplSigil + ElemTypes []Type +} + +// Tuple creates a tuple type with the given element types. +// +// After a slice is passed to this function the caller must no longer access +// the underlying array, since ownership is transferred to this library. +func Tuple(elemTypes []Type) Type { + return Type{ + typeTuple{ + ElemTypes: elemTypes, + }, + } +} + +func (t typeTuple) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeTuple); ok { + if len(t.ElemTypes) != len(ot.ElemTypes) { + // Fast path: if we don't have the same number of elements + // then we can't possibly be equal. + return false + } + + for i, ty := range t.ElemTypes { + oty := ot.ElemTypes[i] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeTuple) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write a tuple type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // a tuple type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "tuple" +} + +func (t typeTuple) GoString() string { + if len(t.ElemTypes) == 0 { + return "cty.EmptyTuple" + } + return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes) +} + +// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about +// the empty tuple type. +var EmptyTuple Type + +// EmptyTupleVal is the only possible non-null, non-unknown value of type +// EmptyTuple. +var EmptyTupleVal Value + +func init() { + EmptyTuple = Tuple([]Type{}) + EmptyTupleVal = Value{ + ty: EmptyTuple, + v: []interface{}{}, + } +} + +// IsTupleType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsTupleType() bool { + _, ok := t.typeImpl.(typeTuple) + return ok +} + +// Length returns the number of elements of the receiving tuple type. +// Will panic if the reciever isn't a tuple type; use IsTupleType to determine +// whether this operation will succeed. +func (t Type) Length() int { + if ot, ok := t.typeImpl.(typeTuple); ok { + return len(ot.ElemTypes) + } + panic("Length on non-tuple Type") +} + +// TupleElementType returns the type of the element with the given index. Will +// panic if the receiver is not a tuple type (use IsTupleType to confirm) +// or if the index is out of range (use Length to confirm). +func (t Type) TupleElementType(idx int) Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes[idx] + } + panic("TupleElementType on non-tuple Type") +} + +// TupleElementTypes returns a slice of the recieving tuple type's element +// types. Will panic if the receiver is not a tuple type (use IsTupleType +// to confirm). +// +// The returned slice is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the +// underlying array. For many purposes the element-related methods of Value +// are more appropriate and more convenient to use. +func (t Type) TupleElementTypes() []Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes + } + panic("TupleElementTypes on non-tuple Type") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/type.go b/vendor/github.com/zclconf/go-cty/cty/type.go new file mode 100644 index 0000000..730cb98 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/type.go @@ -0,0 +1,120 @@ +package cty + +// Type represents value types within the type system. +// +// This is a closed interface type, meaning that only the concrete +// implementations provided within this package are considered valid. +type Type struct { + typeImpl +} + +type typeImpl interface { + // isTypeImpl is a do-nothing method that exists only to express + // that a type is an implementation of typeImpl. + isTypeImpl() typeImplSigil + + // Equals returns true if the other given Type exactly equals the + // receiver Type. + Equals(other Type) bool + + // FriendlyName returns a human-friendly *English* name for the given + // type. + FriendlyName(mode friendlyTypeNameMode) string + + // GoString implements the GoStringer interface from package fmt. + GoString() string +} + +// Base implementation of Type to embed into concrete implementations +// to signal that they are implementations of Type. +type typeImplSigil struct{} + +func (t typeImplSigil) isTypeImpl() typeImplSigil { + return typeImplSigil{} +} + +// Equals returns true if the other given Type exactly equals the receiver +// type. +func (t Type) Equals(other Type) bool { + return t.typeImpl.Equals(other) +} + +// FriendlyName returns a human-friendly *English* name for the given type. +func (t Type) FriendlyName() string { + return t.typeImpl.FriendlyName(friendlyTypeName) +} + +// FriendlyNameForConstraint is similar to FriendlyName except that the +// result is specialized for describing type _constraints_ rather than types +// themselves. This is more appropriate when reporting that a particular value +// does not conform to an expected type constraint. +// +// In particular, this function uses the term "any type" to refer to +// cty.DynamicPseudoType, rather than "dynamic" as returned by FriendlyName. +func (t Type) FriendlyNameForConstraint() string { + return t.typeImpl.FriendlyName(friendlyTypeConstraintName) +} + +// friendlyNameMode is an internal combination of the various FriendlyName* +// variants that just directly takes a mode, for easy passthrough for +// recursive name construction. +func (t Type) friendlyNameMode(mode friendlyTypeNameMode) string { + return t.typeImpl.FriendlyName(mode) +} + +// GoString returns a string approximating how the receiver type would be +// expressed in Go source code. +func (t Type) GoString() string { + if t.typeImpl == nil { + return "cty.NilType" + } + + return t.typeImpl.GoString() +} + +// NilType is an invalid type used when a function is returning an error +// and has no useful type to return. It should not be used and any methods +// called on it will panic. +var NilType = Type{} + +// HasDynamicTypes returns true either if the receiver is itself +// DynamicPseudoType or if it is a compound type whose descendent elements +// are DynamicPseudoType. +func (t Type) HasDynamicTypes() bool { + switch { + case t == DynamicPseudoType: + return true + case t.IsPrimitiveType(): + return false + case t.IsCollectionType(): + return false + case t.IsObjectType(): + attrTypes := t.AttributeTypes() + for _, at := range attrTypes { + if at.HasDynamicTypes() { + return true + } + } + return false + case t.IsTupleType(): + elemTypes := t.TupleElementTypes() + for _, et := range elemTypes { + if et.HasDynamicTypes() { + return true + } + } + return false + case t.IsCapsuleType(): + return false + default: + // Should never happen, since above should be exhaustive + panic("HasDynamicTypes does not support the given type") + } +} + +type friendlyTypeNameMode rune + +const ( + friendlyTypeName friendlyTypeNameMode = 'N' + friendlyTypeConstraintName friendlyTypeNameMode = 'C' +) diff --git a/vendor/github.com/zclconf/go-cty/cty/type_conform.go b/vendor/github.com/zclconf/go-cty/cty/type_conform.go new file mode 100644 index 0000000..b417dc7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/type_conform.go @@ -0,0 +1,142 @@ +package cty + +// TestConformance recursively walks the receiver and the given other type and +// returns nil if the receiver *conforms* to the given type. +// +// Type conformance is similar to type equality but has one crucial difference: +// PseudoTypeDynamic can be used within the given type to represent that +// *any* type is allowed. +// +// If any non-conformities are found, the returned slice will be non-nil and +// contain at least one error value. It will be nil if the type is entirely +// conformant. +// +// Note that the special behavior of PseudoTypeDynamic is the *only* exception +// to normal type equality. Calling applications may wish to apply their own +// automatic conversion logic to the given data structure to create a more +// liberal notion of conformance to a type. +// +// Returned errors are usually (but not always) PathError instances that +// indicate where in the structure the error was found. If a returned error +// is of that type then the error message is written for (English-speaking) +// end-users working within the cty type system, not mentioning any Go-oriented +// implementation details. +func (t Type) TestConformance(other Type) []error { + path := make(Path, 0) + var errs []error + testConformance(t, other, path, &errs) + return errs +} + +func testConformance(given Type, want Type, path Path, errs *[]error) { + if want.Equals(DynamicPseudoType) { + // anything goes! + return + } + + if given.Equals(want) { + // Any equal types are always conformant + return + } + + // The remainder of this function is concerned with detecting + // and reporting the specific non-conformance, since we wouldn't + // have got here if the types were not divergent. + // We treat compound structures as special so that we can report + // specifically what is non-conforming, rather than simply returning + // the entire type names and letting the user puzzle it out. + + if given.IsObjectType() && want.IsObjectType() { + givenAttrs := given.AttributeTypes() + wantAttrs := want.AttributeTypes() + + if len(givenAttrs) != len(wantAttrs) { + // Something is missing from one of them. + for k := range givenAttrs { + if _, exists := wantAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "unsupported attribute %q", k), + ) + } + } + for k := range wantAttrs { + if _, exists := givenAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "missing required attribute %q", k), + ) + } + } + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for k, wantAttrType := range wantAttrs { + if givenAttrType, exists := givenAttrs[k]; exists { + path[pathIdx] = GetAttrStep{Name: k} + testConformance(givenAttrType, wantAttrType, path, errs) + } + } + + path = path[0:pathIdx] + + return + } + + if given.IsTupleType() && want.IsTupleType() { + givenElems := given.TupleElementTypes() + wantElems := want.TupleElementTypes() + + if len(givenElems) != len(wantElems) { + *errs = append( + *errs, + errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)), + ) + return + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for i, wantElemType := range wantElems { + givenElemType := givenElems[i] + path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))} + testConformance(givenElemType, wantElemType, path, errs) + } + + path = path[0:pathIdx] + + return + } + + if given.IsListType() && want.IsListType() { + path = append(path, IndexStep{Key: UnknownVal(Number)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsMapType() && want.IsMapType() { + path = append(path, IndexStep{Key: UnknownVal(String)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsSetType() && want.IsSetType() { + path = append(path, IndexStep{Key: UnknownVal(given.ElementType())}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + *errs = append( + *errs, + errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()), + ) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/types_to_register.go b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go new file mode 100644 index 0000000..e1e220a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go @@ -0,0 +1,57 @@ +package cty + +import ( + "encoding/gob" + "fmt" + "math/big" + "strings" + + "github.com/zclconf/go-cty/cty/set" +) + +// InternalTypesToRegister is a slice of values that covers all of the +// internal types used in the representation of cty.Type and cty.Value +// across all cty Types. +// +// This is intended to be used to register these types with encoding +// packages that require registration of types used in interfaces, such as +// encoding/gob, thus allowing cty types and values to be included in streams +// created from those packages. However, registering with gob is not necessary +// since that is done automatically as a side-effect of importing this package. +// +// Callers should not do anything with the values here except pass them on +// verbatim to a registration function. +// +// If the calling application uses Capsule types that wrap local structs either +// directly or indirectly, these structs may also need to be registered in +// order to support encoding and decoding of values of these types. That is the +// responsibility of the calling application. +var InternalTypesToRegister []interface{} + +func init() { + InternalTypesToRegister = []interface{}{ + primitiveType{}, + typeList{}, + typeMap{}, + typeObject{}, + typeSet{}, + setRules{}, + set.Set{}, + typeTuple{}, + big.Float{}, + capsuleType{}, + []interface{}(nil), + map[string]interface{}(nil), + } + + // Register these with gob here, rather than in gob.go, to ensure + // that this will always happen after we build the above. + for _, tv := range InternalTypesToRegister { + typeName := fmt.Sprintf("%T", tv) + if strings.HasPrefix(typeName, "cty.") { + gob.RegisterName(fmt.Sprintf("github.com/zclconf/go-cty/%s", typeName), tv) + } else { + gob.Register(tv) + } + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown.go b/vendor/github.com/zclconf/go-cty/cty/unknown.go new file mode 100644 index 0000000..e54179e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/unknown.go @@ -0,0 +1,84 @@ +package cty + +// unknownType is the placeholder type used for the sigil value representing +// "Unknown", to make it unambigiously distinct from any other possible value. +type unknownType struct { +} + +// Unknown is a special value that can be +var unknown interface{} = &unknownType{} + +// UnknownVal returns an Value that represents an unknown value of the given +// type. Unknown values can be used to represent a value that is +// not yet known. Its meaning is undefined in cty, but it could be used by +// an calling application to allow partial evaluation. +// +// Unknown values of any type can be created of any type. All operations on +// Unknown values themselves return Unknown. +func UnknownVal(t Type) Value { + return Value{ + ty: t, + v: unknown, + } +} + +func (t unknownType) GoString() string { + // This is the stringification of our internal unknown marker. The + // stringification of the public representation of unknowns is in + // Value.GoString. + return "cty.unknown" +} + +type pseudoTypeDynamic struct { + typeImplSigil +} + +// DynamicPseudoType represents the dynamic pseudo-type. +// +// This type can represent situations where a type is not yet known. Its +// meaning is undefined in cty, but it could be used by a calling +// application to allow expression type checking with some types not yet known. +// For example, the application might optimistically permit any operation on +// values of this type in type checking, allowing a partial type-check result, +// and then repeat the check when more information is known to get the +// final, concrete type. +// +// It is a pseudo-type because it is used only as a sigil to the calling +// application. "Unknown" is the only valid value of this pseudo-type, so +// operations on values of this type will always short-circuit as per +// the rules for that special value. +var DynamicPseudoType Type + +func (t pseudoTypeDynamic) Equals(other Type) bool { + _, ok := other.typeImpl.(pseudoTypeDynamic) + return ok +} + +func (t pseudoTypeDynamic) FriendlyName(mode friendlyTypeNameMode) string { + switch mode { + case friendlyTypeConstraintName: + return "any type" + default: + return "dynamic" + } +} + +func (t pseudoTypeDynamic) GoString() string { + return "cty.DynamicPseudoType" +} + +// DynamicVal is the only valid value of the pseudo-type dynamic. +// This value can be used as a placeholder where a value or expression's +// type and value are both unknown, thus allowing partial evaluation. See +// the docs for DynamicPseudoType for more information. +var DynamicVal Value + +func init() { + DynamicPseudoType = Type{ + pseudoTypeDynamic{}, + } + DynamicVal = Value{ + ty: DynamicPseudoType, + v: unknown, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go b/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go new file mode 100644 index 0000000..cfd4a42 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go @@ -0,0 +1,64 @@ +package cty + +// UnknownAsNull returns a value of the same type as the given value but +// with any unknown values (including nested values) replaced with null +// values of the same type. +// +// This can be useful if a result is to be serialized in a format that can't +// represent unknowns, such as JSON, as long as the caller does not need to +// retain the unknown value information. +func UnknownAsNull(val Value) Value { + if !val.IsKnown() { + return NullVal(val.Type()) + } + + ty := val.Type() + switch { + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make([]Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, UnknownAsNull(v)) + } + switch { + case ty.IsListType(): + return ListVal(vals) + case ty.IsTupleType(): + return TupleVal(vals) + default: + return SetVal(vals) + } + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make(map[string]Value, length) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vals[k.AsString()] = UnknownAsNull(v) + } + switch { + case ty.IsMapType(): + return MapVal(vals) + default: + return ObjectVal(vals) + } + } + + return val +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value.go b/vendor/github.com/zclconf/go-cty/cty/value.go new file mode 100644 index 0000000..80cb8f7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value.go @@ -0,0 +1,98 @@ +package cty + +// Value represents a value of a particular type, and is the interface by +// which operations are executed on typed values. +// +// Value has two different classes of method. Operation methods stay entirely +// within the type system (methods accept and return Value instances) and +// are intended for use in implementing a language in terms of cty, while +// integration methods either enter or leave the type system, working with +// native Go values. Operation methods are guaranteed to support all of the +// expected short-circuit behavior for unknown and dynamic values, while +// integration methods may not. +// +// The philosophy for the operations API is that it's the caller's +// responsibility to ensure that the given types and values satisfy the +// specified invariants during a separate type check, so that the caller is +// able to return errors to its user from the application's own perspective. +// +// Consequently the design of these methods assumes such checks have already +// been done and panics if any invariants turn out not to be satisfied. These +// panic errors are not intended to be handled, but rather indicate a bug in +// the calling application that should be fixed with more checks prior to +// executing operations. +// +// A related consequence of this philosophy is that no automatic type +// conversions are done. If a method specifies that its argument must be +// number then it's the caller's responsibility to do that conversion before +// the call, thus allowing the application to have more constrained conversion +// rules than are offered by the built-in converter where necessary. +type Value struct { + ty Type + v interface{} +} + +// Type returns the type of the value. +func (val Value) Type() Type { + return val.ty +} + +// IsKnown returns true if the value is known. That is, if it is not +// the result of the unknown value constructor Unknown(...), and is not +// the result of an operation on another unknown value. +// +// Unknown values are only produced either directly or as a result of +// operating on other unknown values, and so an application that never +// introduces Unknown values can be guaranteed to never receive any either. +func (val Value) IsKnown() bool { + return val.v != unknown +} + +// IsNull returns true if the value is null. Values of any type can be +// null, but any operations on a null value will panic. No operation ever +// produces null, so an application that never introduces Null values can +// be guaranteed to never receive any either. +func (val Value) IsNull() bool { + return val.v == nil +} + +// NilVal is an invalid Value that can be used as a placeholder when returning +// with an error from a function that returns (Value, error). +// +// NilVal is *not* a valid error and so no operations may be performed on it. +// Any attempt to use it will result in a panic. +// +// This should not be confused with the idea of a Null value, as returned by +// NullVal. NilVal is a nil within the *Go* type system, and is invalid in +// the cty type system. Null values *do* exist in the cty type system. +var NilVal = Value{ + ty: Type{typeImpl: nil}, + v: nil, +} + +// IsWhollyKnown is an extension of IsKnown that also recursively checks +// inside collections and structures to see if there are any nested unknown +// values. +func (val Value) IsWhollyKnown() bool { + if !val.IsKnown() { + return false + } + + if val.IsNull() { + // Can't recurse into a null, so we're done + return true + } + + switch { + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsWhollyKnown() { + return false + } + } + return true + default: + return true + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value_init.go b/vendor/github.com/zclconf/go-cty/cty/value_init.go new file mode 100644 index 0000000..495a83e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_init.go @@ -0,0 +1,276 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "golang.org/x/text/unicode/norm" + + "github.com/zclconf/go-cty/cty/set" +) + +// BoolVal returns a Value of type Number whose internal value is the given +// bool. +func BoolVal(v bool) Value { + return Value{ + ty: Bool, + v: v, + } +} + +// NumberVal returns a Value of type Number whose internal value is the given +// big.Float. The returned value becomes the owner of the big.Float object, +// and so it's forbidden for the caller to mutate the object after it's +// wrapped in this way. +func NumberVal(v *big.Float) Value { + return Value{ + ty: Number, + v: v, + } +} + +// NumberIntVal returns a Value of type Number whose internal value is equal +// to the given integer. +func NumberIntVal(v int64) Value { + return NumberVal(new(big.Float).SetInt64(v)) +} + +// NumberUIntVal returns a Value of type Number whose internal value is equal +// to the given unsigned integer. +func NumberUIntVal(v uint64) Value { + return NumberVal(new(big.Float).SetUint64(v)) +} + +// NumberFloatVal returns a Value of type Number whose internal value is +// equal to the given float. +func NumberFloatVal(v float64) Value { + return NumberVal(new(big.Float).SetFloat64(v)) +} + +// StringVal returns a Value of type String whose internal value is the +// given string. +// +// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and +// they are NFC-normalized on entry into the world of cty values. +// +// If the given string is not valid UTF-8 then behavior of string operations +// is undefined. +func StringVal(v string) Value { + return Value{ + ty: String, + v: NormalizeString(v), + } +} + +// NormalizeString applies the same normalization that cty applies when +// constructing string values. +// +// A return value from this function can be meaningfully compared byte-for-byte +// with a Value.AsString result. +func NormalizeString(s string) string { + return norm.NFC.String(s) +} + +// ObjectVal returns a Value of an object type whose structure is defined +// by the key names and value types in the given map. +func ObjectVal(attrs map[string]Value) Value { + attrTypes := make(map[string]Type, len(attrs)) + attrVals := make(map[string]interface{}, len(attrs)) + + for attr, val := range attrs { + attr = NormalizeString(attr) + attrTypes[attr] = val.ty + attrVals[attr] = val.v + } + + return Value{ + ty: Object(attrTypes), + v: attrVals, + } +} + +// TupleVal returns a Value of a tuple type whose element types are +// defined by the value types in the given slice. +func TupleVal(elems []Value) Value { + elemTypes := make([]Type, len(elems)) + elemVals := make([]interface{}, len(elems)) + + for i, val := range elems { + elemTypes[i] = val.ty + elemVals[i] = val.v + } + + return Value{ + ty: Tuple(elemTypes), + v: elemVals, + } +} + +// ListVal returns a Value of list type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also ListValEmpty.) +func ListVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call ListVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent list element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + return Value{ + ty: List(elementType), + v: rawList, + } +} + +// ListValEmpty returns an empty list of the given element type. +func ListValEmpty(element Type) Value { + return Value{ + ty: List(element), + v: []interface{}{}, + } +} + +// MapVal returns a Value of a map type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given map is empty, since then the element type cannot be inferred. +// (See also MapValEmpty.) +func MapVal(vals map[string]Value) Value { + if len(vals) == 0 { + panic("must not call MapVal with empty map") + } + elementType := DynamicPseudoType + rawMap := make(map[string]interface{}, len(vals)) + + for key, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent map element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawMap[NormalizeString(key)] = val.v + } + + return Value{ + ty: Map(elementType), + v: rawMap, + } +} + +// MapValEmpty returns an empty map of the given element type. +func MapValEmpty(element Type) Value { + return Value{ + ty: Map(element), + v: map[string]interface{}{}, + } +} + +// SetVal returns a Value of set type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also SetValEmpty.) +func SetVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call SetVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent set element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + rawVal := set.NewSetFromSlice(setRules{elementType}, rawList) + + return Value{ + ty: Set(elementType), + v: rawVal, + } +} + +// SetValFromValueSet returns a Value of set type based on an already-constructed +// ValueSet. +// +// The element type of the returned value is the element type of the given +// set. +func SetValFromValueSet(s ValueSet) Value { + ety := s.ElementType() + rawVal := s.s.Copy() // copy so caller can't mutate what we wrap + + return Value{ + ty: Set(ety), + v: rawVal, + } +} + +// SetValEmpty returns an empty set of the given element type. +func SetValEmpty(element Type) Value { + return Value{ + ty: Set(element), + v: set.NewSet(setRules{element}), + } +} + +// CapsuleVal creates a value of the given capsule type using the given +// wrapVal, which must be a pointer to a value of the capsule type's native +// type. +// +// This function will panic if the given type is not a capsule type, if +// the given wrapVal is not compatible with the given capsule type, or if +// wrapVal is not a pointer. +func CapsuleVal(ty Type, wrapVal interface{}) Value { + if !ty.IsCapsuleType() { + panic("not a capsule type") + } + + wv := reflect.ValueOf(wrapVal) + if wv.Kind() != reflect.Ptr { + panic("wrapVal is not a pointer") + } + + it := ty.typeImpl.(*capsuleType).GoType + if !wv.Type().Elem().AssignableTo(it) { + panic("wrapVal target is not compatible with the given capsule type") + } + + return Value{ + ty: ty, + v: wrapVal, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go new file mode 100644 index 0000000..2089fb3 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -0,0 +1,1075 @@ +package cty + +import ( + "fmt" + "math/big" + + "reflect" + + "github.com/zclconf/go-cty/cty/set" +) + +func (val Value) GoString() string { + if val == NilVal { + return "cty.NilVal" + } + + if val.ty == DynamicPseudoType { + return "cty.DynamicVal" + } + + if !val.IsKnown() { + return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) + } + if val.IsNull() { + return fmt.Sprintf("cty.NullVal(%#v)", val.ty) + } + + // By the time we reach here we've dealt with all of the exceptions around + // unknowns and nulls, so we're guaranteed that the values are the + // canonical internal representation of the given type. + + switch val.ty { + case Bool: + if val.v.(bool) { + return "cty.True" + } else { + return "cty.False" + } + case Number: + fv := val.v.(*big.Float) + // We'll try to use NumberIntVal or NumberFloatVal if we can, since + // the fully-general initializer call is pretty ugly-looking. + if fv.IsInt() { + return fmt.Sprintf("cty.NumberIntVal(%#v)", fv) + } + if rfv, accuracy := fv.Float64(); accuracy == big.Exact { + return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv) + } + return fmt.Sprintf("cty.NumberVal(new(big.Float).Parse(\"%#v\", 10))", fv) + case String: + return fmt.Sprintf("cty.StringVal(%#v)", val.v) + } + + switch { + case val.ty.IsSetType(): + vals := val.v.(set.Set).Values() + if vals == nil || len(vals) == 0 { + return fmt.Sprintf("cty.SetValEmpty()") + } else { + return fmt.Sprintf("cty.SetVal(%#v)", vals) + } + case val.ty.IsCapsuleType(): + return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v) + } + + // Default exposes implementation details, so should actually cover + // all of the cases above for good caller UX. + return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v) +} + +// Equals returns True if the receiver and the given other value have the +// same type and are exactly equal in value. +// +// The usual short-circuit rules apply, so the result can be unknown or typed +// as dynamic if either of the given values are. Use RawEquals to compare +// if two values are equal *ignoring* the short-circuit rules. +func (val Value) Equals(other Value) Value { + if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { + return UnknownVal(Bool) + } + + if !val.ty.Equals(other.ty) { + return BoolVal(false) + } + + if !(val.IsKnown() && other.IsKnown()) { + return UnknownVal(Bool) + } + + if val.IsNull() || other.IsNull() { + if val.IsNull() && other.IsNull() { + return BoolVal(true) + } + return BoolVal(false) + } + + ty := val.ty + result := false + + switch { + case ty == Number: + result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + case ty == Bool: + result = val.v.(bool) == other.v.(bool) + case ty == String: + // Simple equality is safe because we NFC-normalize strings as they + // enter our world from StringVal, and so we can assume strings are + // always in normal form. + result = val.v.(string) == other.v.(string) + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + result = true + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + result = true + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + result = true + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + equal := true + + // Note that by our definition of sets it's never possible for two + // sets that contain unknown values (directly or indicrectly) to + // ever be equal, even if they are otherwise identical. + + // FIXME: iterating both lists and checking each item is not the + // ideal implementation here, but it works with the primitives we + // have in the set implementation. Perhaps the set implementation + // can provide its own equality test later. + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + equal = false + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + equal = false + } + }) + + result = equal + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + result = true + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + result = false + break + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return BoolVal(val.v == other.v) + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in Equals", ty)) + } + + return BoolVal(result) +} + +// NotEqual is a shorthand for Equals followed by Not. +func (val Value) NotEqual(other Value) Value { + return val.Equals(other).Not() +} + +// True returns true if the receiver is True, false if False, and panics if +// the receiver is not of type Bool. +// +// This is a helper function to help write application logic that works with +// values, rather than a first-class operation. It does not work with unknown +// or null values. For more robust handling with unknown value +// short-circuiting, use val.Equals(cty.True). +func (val Value) True() bool { + if val.ty != Bool { + panic("not bool") + } + return val.Equals(True).v.(bool) +} + +// False is the opposite of True. +func (val Value) False() bool { + return !val.True() +} + +// RawEquals returns true if and only if the two given values have the same +// type and equal value, ignoring the usual short-circuit rules about +// unknowns and dynamic types. +// +// This method is more appropriate for testing than for real use, since it +// skips over usual semantics around unknowns but as a consequence allows +// testing the result of another operation that is expected to return unknown. +// It returns a primitive Go bool rather than a Value to remind us that it +// is not a first-class value operation. +func (val Value) RawEquals(other Value) bool { + if !val.ty.Equals(other.ty) { + return false + } + if (!val.IsKnown()) && (!other.IsKnown()) { + return true + } + if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { + return false + } + if val.IsNull() && other.IsNull() { + return true + } + if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) { + return false + } + if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType { + return true + } + + ty := val.ty + switch { + case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType: + return val.Equals(other).True() + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + + // Since we're intentionally ignoring our rule that two unknowns + // are never equal, we can cheat here. + // (This isn't 100% right since e.g. it will fail if the set contains + // numbers that are infinite, which DeepEqual can't compare properly. + // We're accepting that limitation for simplicity here, since this + // function is here primarily for testing.) + return reflect.DeepEqual(s1, s2) + + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + return false + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return val.v == other.v + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty)) + } +} + +// Add returns the sum of the receiver and the given other value. Both values +// must be numbers; this method will panic if not. +func (val Value) Add(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Add(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Subtract returns receiver minus the given other value. Both values must be +// numbers; this method will panic if not. +func (val Value) Subtract(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + return val.Add(other.Negate()) +} + +// Negate returns the numeric negative of the receiver, which must be a number. +// This method will panic when given a value of any other type. +func (val Value) Negate() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float).Neg(val.v.(*big.Float)) + return NumberVal(ret) +} + +// Multiply returns the product of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +func (val Value) Multiply(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Mul(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Divide returns the quotient of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// If both values are zero or infinity, this function will panic with +// an instance of big.ErrNaN. +func (val Value) Divide(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Quo(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Modulo returns the remainder of an integer division of the receiver and +// the given other value. Both values must be numbers; this method will panic +// if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// This operation is primarily here for use with nonzero natural numbers. +// Modulo with "other" as a non-natural number gets somewhat philosophical, +// and this function takes a position on what that should mean, but callers +// may wish to disallow such things outright or implement their own modulo +// if they disagree with the interpretation used here. +func (val Value) Modulo(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + // We cheat a bit here with infinities, just abusing the Multiply operation + // to get an infinite result of the correct sign. + if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity { + return val.Multiply(other) + } + + if other.RawEquals(Zero) { + return val + } + + // FIXME: This is a bit clumsy. Should come back later and see if there's a + // more straightforward way to do this. + rat := val.Divide(other) + ratFloorInt := &big.Int{} + rat.v.(*big.Float).Int(ratFloorInt) + work := (&big.Float{}).SetInt(ratFloorInt) + work.Mul(other.v.(*big.Float), work) + work.Sub(val.v.(*big.Float), work) + + return NumberVal(work) +} + +// Absolute returns the absolute (signless) value of the receiver, which must +// be a number or this method will panic. +func (val Value) Absolute() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := (&big.Float{}).Abs(val.v.(*big.Float)) + return NumberVal(ret) +} + +// GetAttr returns the value of the given attribute of the receiver, which +// must be of an object type that has an attribute of the given name. +// This method will panic if the receiver type is not compatible. +// +// The method will also panic if the given attribute name is not defined +// for the value's type. Use the attribute-related methods on Type to +// check for the validity of an attribute before trying to use it. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be DynamicVal. +func (val Value) GetAttr(name string) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + if !val.ty.IsObjectType() { + panic("value is not an object") + } + + name = NormalizeString(name) + if !val.ty.HasAttribute(name) { + panic("value has no attribute of that name") + } + + attrType := val.ty.AttributeType(name) + + if !val.IsKnown() { + return UnknownVal(attrType) + } + + return Value{ + ty: attrType, + v: val.v.(map[string]interface{})[name], + } +} + +// Index returns the value of an element of the receiver, which must have +// either a list, map or tuple type. This method will panic if the receiver +// type is not compatible. +// +// The key value must be the correct type for the receving collection: a +// number if the collection is a list or tuple, or a string if it is a map. +// In the case of a list or tuple, the given number must be convertable to int +// or this method will panic. The key may alternatively be of +// DynamicPseudoType, in which case the result itself is an unknown of the +// collection's element type. +// +// The result is of the receiver collection's element type, or in the case +// of a tuple the type of the specific element index requested. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be the DynamicValue. +func (val Value) Index(key Value) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + switch { + case val.Type().IsListType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != Number { + panic("element key for list must be number") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + return Value{ + ty: elty, + v: val.v.([]interface{})[index], + } + case val.Type().IsMapType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != String { + panic("element key for map must be string") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + keyStr := key.v.(string) + + return Value{ + ty: elty, + v: val.v.(map[string]interface{})[keyStr], + } + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return DynamicVal + } + + if key.Type() != Number { + panic("element key for tuple must be number") + } + if !key.IsKnown() { + return DynamicVal + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + eltys := val.Type().TupleElementTypes() + + if !val.IsKnown() { + return UnknownVal(eltys[index]) + } + + return Value{ + ty: eltys[index], + v: val.v.([]interface{})[index], + } + default: + panic("not a list, map, or tuple type") + } +} + +// HasIndex returns True if the receiver (which must be supported for Index) +// has an element with the given index key, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the collection or the +// key value are unknown. +// +// This method will panic if the receiver is not indexable, but does not +// impose any panic-causing type constraints on the key. +func (val Value) HasIndex(key Value) Value { + if val.ty == DynamicPseudoType { + return UnknownVal(Bool) + } + + switch { + case val.Type().IsListType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) + case val.Type().IsMapType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != String { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + keyStr := key.v.(string) + _, exists := val.v.(map[string]interface{})[keyStr] + + return BoolVal(exists) + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + length := val.Type().Length() + return BoolVal(int(index) < length && index >= 0) + default: + panic("not a list, map, or tuple type") + } +} + +// HasElement returns True if the receiver (which must be of a set type) +// has the given value as an element, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the set or the +// given value are unknown. +// +// This method will panic if the receiver is not a set, or if it is a null set. +func (val Value) HasElement(elem Value) Value { + ty := val.Type() + + if !ty.IsSetType() { + panic("not a set type") + } + if !val.IsKnown() || !elem.IsKnown() { + return UnknownVal(Bool) + } + if val.IsNull() { + panic("can't call HasElement on a nil value") + } + if ty.ElementType() != elem.Type() { + return False + } + + s := val.v.(set.Set) + return BoolVal(s.Has(elem.v)) +} + +// Length returns the length of the receiver, which must be a collection type +// or tuple type, as a number value. If the receiver is not a compatible type +// then this method will panic. +// +// If the receiver is unknown then the result is also unknown. +// +// If the receiver is null then this function will panic. +// +// Note that Length is not supported for strings. To determine the length +// of a string, call AsString and take the length of the native Go string +// that is returned. +func (val Value) Length() Value { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return NumberIntVal(int64(val.Type().Length())) + } + + if !val.IsKnown() { + return UnknownVal(Number) + } + + return NumberIntVal(int64(val.LengthInt())) +} + +// LengthInt is like Length except it returns an int. It has the same behavior +// as Length except that it will panic if the receiver is unknown. +// +// This is an integration method provided for the convenience of code bridging +// into Go's type system. +func (val Value) LengthInt() int { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return val.Type().Length() + } + if val.Type().IsObjectType() { + // For objects, the length is the number of attributes associated with the type. + return len(val.Type().AttributeTypes()) + } + if !val.IsKnown() { + panic("value is not known") + } + if val.IsNull() { + panic("value is null") + } + + switch { + + case val.ty.IsListType(): + return len(val.v.([]interface{})) + + case val.ty.IsSetType(): + return val.v.(set.Set).Length() + + case val.ty.IsMapType(): + return len(val.v.(map[string]interface{})) + + default: + panic("value is not a collection") + } +} + +// ElementIterator returns an ElementIterator for iterating the elements +// of the receiver, which must be a collection type, a tuple type, or an object +// type. If called on a method of any other type, this method will panic. +// +// The value must be Known and non-Null, or this method will panic. +// +// If the receiver is of a list type, the returned keys will be of type Number +// and the values will be of the list's element type. +// +// If the receiver is of a map type, the returned keys will be of type String +// and the value will be of the map's element type. Elements are passed in +// ascending lexicographical order by key. +// +// If the receiver is of a set type, each element is returned as both the +// key and the value, since set members are their own identity. +// +// If the receiver is of a tuple type, the returned keys will be of type Number +// and the value will be of the corresponding element's type. +// +// If the receiver is of an object type, the returned keys will be of type +// String and the value will be of the corresponding attributes's type. +// +// ElementIterator is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ElementIterator() ElementIterator { + if !val.IsKnown() { + panic("can't use ElementIterator on unknown value") + } + if val.IsNull() { + panic("can't use ElementIterator on null value") + } + return elementIterator(val) +} + +// CanIterateElements returns true if the receiver can support the +// ElementIterator method (and by extension, ForEachElement) without panic. +func (val Value) CanIterateElements() bool { + return canElementIterator(val) +} + +// ForEachElement executes a given callback function for each element of +// the receiver, which must be a collection type or tuple type, or this method +// will panic. +// +// ForEachElement uses ElementIterator internally, and so the values passed +// to the callback are as described for ElementIterator. +// +// Returns true if the iteration exited early due to the callback function +// returning true, or false if the loop ran to completion. +// +// ForEachElement is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ForEachElement(cb ElementCallback) bool { + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + stop := cb(key, val) + if stop { + return true + } + } + return false +} + +// Not returns the logical inverse of the receiver, which must be of type +// Bool or this method will panic. +func (val Value) Not() Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(!val.v.(bool)) +} + +// And returns the result of logical AND with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) And(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) && other.v.(bool)) +} + +// Or returns the result of logical OR with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) Or(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) || other.v.(bool)) +} + +// LessThan returns True if the receiver is less than the other given value, +// which must both be numbers or this method will panic. +func (val Value) LessThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) +} + +// GreaterThan returns True if the receiver is greater than the other given +// value, which must both be numbers or this method will panic. +func (val Value) GreaterThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) +} + +// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or. +func (val Value) LessThanOrEqualTo(other Value) Value { + return val.LessThan(other).Or(val.Equals(other)) +} + +// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or. +func (val Value) GreaterThanOrEqualTo(other Value) Value { + return val.GreaterThan(other).Or(val.Equals(other)) +} + +// AsString returns the native string from a non-null, non-unknown cty.String +// value, or panics if called on any other value. +func (val Value) AsString() string { + if val.ty != String { + panic("not a string") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + return val.v.(string) +} + +// AsBigFloat returns a big.Float representation of a non-null, non-unknown +// cty.Number value, or panics if called on any other value. +// +// For more convenient conversions to other native numeric types, use the +// "gocty" package. +func (val Value) AsBigFloat() *big.Float { + if val.ty != Number { + panic("not a number") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + // Copy the float so that callers can't mutate our internal state + ret := *(val.v.(*big.Float)) + + return &ret +} + +// AsValueSlice returns a []cty.Value representation of a non-null, non-unknown +// value of any type that CanIterateElements, or panics if called on +// any other value. +// +// For more convenient conversions to slices of more specific types, use +// the "gocty" package. +func (val Value) AsValueSlice() []Value { + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret = append(ret, v) + } + return ret +} + +// AsValueMap returns a map[string]cty.Value representation of a non-null, +// non-unknown value of any type that CanIterateElements, or panics if called +// on any other value. +// +// For more convenient conversions to maps of more specific types, use +// the "gocty" package. +func (val Value) AsValueMap() map[string]Value { + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make(map[string]Value, l) + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + ret[k.AsString()] = v + } + return ret +} + +// AsValueSet returns a ValueSet representation of a non-null, +// non-unknown value of any collection type, or panics if called +// on any other value. +// +// Unlike AsValueSlice and AsValueMap, this method requires specifically a +// collection type (list, set or map) and does not allow structural types +// (tuple or object), because the ValueSet type requires homogenous +// element types. +// +// The returned ValueSet can store only values of the receiver's element type. +func (val Value) AsValueSet() ValueSet { + if !val.Type().IsCollectionType() { + panic("not a collection type") + } + + // We don't give the caller our own set.Set (assuming we're a cty.Set value) + // because then the caller could mutate our internals, which is forbidden. + // Instead, we will construct a new set and append our elements into it. + ret := NewValueSet(val.Type().ElementType()) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret.Add(v) + } + return ret +} + +// EncapsulatedValue returns the native value encapsulated in a non-null, +// non-unknown capsule-typed value, or panics if called on any other value. +// +// The result is the same pointer that was passed to CapsuleVal to create +// the value. Since cty considers values to be immutable, it is strongly +// recommended to treat the encapsulated value itself as immutable too. +func (val Value) EncapsulatedValue() interface{} { + if !val.Type().IsCapsuleType() { + panic("not a capsule-typed value") + } + + return val.v +} diff --git a/vendor/github.com/zclconf/go-cty/cty/walk.go b/vendor/github.com/zclconf/go-cty/cty/walk.go new file mode 100644 index 0000000..a6943ba --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/walk.go @@ -0,0 +1,182 @@ +package cty + +// Walk visits all of the values in a possibly-complex structure, calling +// a given function for each value. +// +// For example, given a list of strings the callback would first be called +// with the whole list and then called once for each element of the list. +// +// The callback function may prevent recursive visits to child values by +// returning false. The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Walk(val Value, cb func(Path, Value) (bool, error)) error { + var path Path + return walk(path, val, cb) +} + +func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { + deeper, err := cb(path, val) + if err != nil { + return err + } + if !deeper { + return nil + } + + if val.IsNull() || !val.IsKnown() { + // Can't recurse into null or unknown values, regardless of type + return nil + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + nameVal, av := it.Element() + path := append(path, GetAttrStep{ + Name: nameVal.AsString(), + }) + err := walk(path, av, cb) + if err != nil { + return err + } + } + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + err := walk(path, ev, cb) + if err != nil { + return err + } + } + } + return nil +} + +// Transform visits all of the values in a possibly-complex structure, +// calling a given function for each value which has an opportunity to +// replace that value. +// +// Unlike Walk, Transform visits child nodes first, so for a list of strings +// it would first visit the strings and then the _new_ list constructed +// from the transformed values of the list items. +// +// This is useful for creating the effect of being able to make deep mutations +// to a value even though values are immutable. However, it's the responsibility +// of the given function to preserve expected invariants, such as homogenity of +// element types in collections; this function can panic if such invariants +// are violated, just as if new values were constructed directly using the +// value constructor functions. An easy way to preserve invariants is to +// ensure that the transform function never changes the value type. +// +// The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) { + var path Path + return transform(path, val, cb) +} + +func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) { + ty := val.Type() + var newVal Value + + switch { + + case val.IsNull() || !val.IsKnown(): + // Can't recurse into null or unknown values, regardless of type + newVal = val + + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty sequence + newVal = val + default: + elems := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems = append(elems, newEv) + } + switch { + case ty.IsListType(): + newVal = ListVal(elems) + case ty.IsSetType(): + newVal = SetVal(elems) + case ty.IsTupleType(): + newVal = TupleVal(elems) + default: + panic("unknown sequence type") // should never happen because of the case we are in + } + } + + case ty.IsMapType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty map + newVal = val + default: + elems := make(map[string]Value) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems[kv.AsString()] = newEv + } + newVal = MapVal(elems) + } + + case ty.IsObjectType(): + switch { + case ty.Equals(EmptyObject): + // No deep transform for an empty object + newVal = val + default: + atys := ty.AttributeTypes() + newAVs := make(map[string]Value) + for name := range atys { + av := val.GetAttr(name) + path := append(path, GetAttrStep{ + Name: name, + }) + newAV, err := transform(path, av, cb) + if err != nil { + return DynamicVal, err + } + newAVs[name] = newAV + } + newVal = ObjectVal(newAVs) + } + + default: + newVal = val + } + + return cb(path, newVal) +} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 0000000..2b00ddb --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 0000000..1fbd3e9 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 0000000..58ea875 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,289 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 0000000..4d31dd0 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 0000000..5593b1b --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,750 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + MOVQ SP, R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ CX, 16(SP) + XORQ CX, CX + MOVQ CX, 24(SP) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(SP) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(SP) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(SP) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(SP), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(SP) + VMOVDQA Y13, 64(SP) + VMOVDQA Y14, 96(SP) + VMOVDQA Y15, 128(SP) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(SP) + VMOVDQA Y13, 192(SP) + VMOVDQA Y14, 224(SP) + VMOVDQA Y15, 256(SP) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5) + ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + MOVQ DX, SP + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(SP), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(SP) + VMOVDQA X13, 32(SP) + VMOVDQA X14, 48(SP) + VMOVDQA X15, 64(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(SP) + VMOVDQA X13, 96(SP) + VMOVDQA X14, 112(SP) + VMOVDQA X15, 128(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(SP) + VMOVDQA X13, 160(SP) + VMOVDQA X14, 176(SP) + VMOVDQA X15, 192(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(SP) + VMOVDQA X13, 224(SP) + VMOVDQA X14, 240(SP) + VMOVDQA X15, 256(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + MOVQ BP, SP + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 0000000..30e2fcd --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7,amd64,!gccgo,!appengine + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 0000000..578e947 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,281 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(SP), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(SP) + MOVO X9, 32(SP) + MOVO X10, 48(SP) + MOVO X11, 64(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(SP) + MOVO X9, 96(SP) + MOVO X10, 112(SP) + MOVO X11, 128(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(SP) + MOVO X9, 160(SP) + MOVO X10, 176(SP) + MOVO X11, 192(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(SP) + MOVO X9, 224(SP) + MOVO X10, 240(SP) + MOVO X11, 256(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + MOVQ BP, SP + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 0000000..4bd2abc --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,179 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import "encoding/binary" + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-32) | v12>>32 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-24) | v4>>24 + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-32) | v13>>32 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-24) | v5>>24 + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-32) | v14>>32 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-24) | v6>>24 + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-32) | v15>>32 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-24) | v7>>24 + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = v12<<(64-16) | v12>>16 + v8 += v12 + v4 ^= v8 + v4 = v4<<(64-63) | v4>>63 + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = v13<<(64-16) | v13>>16 + v9 += v13 + v5 ^= v9 + v5 = v5<<(64-63) | v5>>63 + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = v14<<(64-16) | v14>>16 + v10 += v14 + v6 ^= v10 + v6 = v6<<(64-63) | v6>>63 + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = v15<<(64-16) | v15>>16 + v11 += v15 + v7 ^= v11 + v7 = v7<<(64-63) | v7>>63 + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-32) | v15>>32 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-24) | v5>>24 + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-32) | v12>>32 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-24) | v6>>24 + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-32) | v13>>32 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-24) | v7>>24 + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-32) | v14>>32 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-24) | v4>>24 + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = v15<<(64-16) | v15>>16 + v10 += v15 + v5 ^= v10 + v5 = v5<<(64-63) | v5>>63 + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = v12<<(64-16) | v12>>16 + v11 += v12 + v6 ^= v11 + v6 = v6<<(64-63) | v6>>63 + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = v13<<(64-16) | v13>>16 + v8 += v13 + v7 ^= v8 + v7 = v7<<(64-63) | v7>>63 + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = v14<<(64-16) | v14>>16 + v9 += v14 + v4 ^= v9 + v4 = v4<<(64-63) | v4>>63 + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 0000000..da156a1 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 0000000..52c414d --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 0000000..efd689a --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 0000000..a3c021d --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 0000000..d20f52b --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 0000000..d88bd1d --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 0000000..0f35592 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 0000000..b105f80 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/http/httpguts/guts.go b/vendor/golang.org/x/net/http/httpguts/guts.go new file mode 100644 index 0000000..e6cd0ce --- /dev/null +++ b/vendor/golang.org/x/net/http/httpguts/guts.go @@ -0,0 +1,50 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpguts provides functions implementing various details +// of the HTTP specification. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httpguts + +import ( + "net/textproto" + "strings" +) + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See RFC 7230, Section 4.1.2 +func ValidTrailerHeader(name string) bool { + name = textproto.CanonicalMIMEHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go new file mode 100644 index 0000000..e7de24e --- /dev/null +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -0,0 +1,346 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpguts + +import ( + "net" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore new file mode 100644 index 0000000..190f122 --- /dev/null +++ b/vendor/golang.org/x/net/http2/.gitignore @@ -0,0 +1,2 @@ +*~ +h2i/h2i diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile new file mode 100644 index 0000000..53fc525 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Dockerfile @@ -0,0 +1,51 @@ +# +# This Dockerfile builds a recent curl with HTTP/2 client support, using +# a recent nghttp2 build. +# +# See the Makefile for how to tag it. If Docker and that image is found, the +# Go tests use this curl binary for integration tests. +# + +FROM ubuntu:trusty + +RUN apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y git-core build-essential wget + +RUN apt-get install -y --no-install-recommends \ + autotools-dev libtool pkg-config zlib1g-dev \ + libcunit1-dev libssl-dev libxml2-dev libevent-dev \ + automake autoconf + +# The list of packages nghttp2 recommends for h2load: +RUN apt-get install -y --no-install-recommends make binutils \ + autoconf automake autotools-dev \ + libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ + libev-dev libevent-dev libjansson-dev libjemalloc-dev \ + cython python3.4-dev python-setuptools + +# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: +ENV NGHTTP2_VER 895da9a +RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git + +WORKDIR /root/nghttp2 +RUN git reset --hard $NGHTTP2_VER +RUN autoreconf -i +RUN automake +RUN autoconf +RUN ./configure +RUN make +RUN make install + +WORKDIR /root +RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz +RUN tar -zxvf curl-7.45.0.tar.gz +WORKDIR /root/curl-7.45.0 +RUN ./configure --with-ssl --with-nghttp2=/usr/local +RUN make +RUN make install +RUN ldconfig + +CMD ["-h"] +ENTRYPOINT ["/usr/local/bin/curl"] + diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile new file mode 100644 index 0000000..55fd826 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Makefile @@ -0,0 +1,3 @@ +curlimage: + docker build -t gohttp2/curl . + diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README new file mode 100644 index 0000000..360d5aa --- /dev/null +++ b/vendor/golang.org/x/net/http2/README @@ -0,0 +1,20 @@ +This is a work-in-progress HTTP/2 implementation for Go. + +It will eventually live in the Go standard library and won't require +any changes to your code to use. It will just be automatic. + +Status: + +* The server support is pretty good. A few things are missing + but are being worked on. +* The client work has just started but shares a lot of code + is coming along much quicker. + +Docs are at https://godoc.org/golang.org/x/net/http2 + +Demo test server at https://http2.golang.org/ + +Help & bug reports welcome! + +Contributing: https://golang.org/doc/contribute.html +Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go new file mode 100644 index 0000000..c9a0cf3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/ciphers.go @@ -0,0 +1,641 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +// A list of the possible cipher suite ids. Taken from +// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt + +const ( + cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 + cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 + cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 + cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 + cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 + cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 + cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 + cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 + cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B + // Reserved uint16 = 0x001C-1D + cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F + cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 + cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 + cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B + cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C + cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D + cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E + cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 + cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A + cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 + // Reserved uint16 = 0x0047-4F + // Reserved uint16 = 0x0050-58 + // Reserved uint16 = 0x0059-5C + // Unassigned uint16 = 0x005D-5F + // Reserved uint16 = 0x0060-66 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D + // Unassigned uint16 = 0x006E-83 + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 + cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B + cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C + cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 + cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D + cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E + cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 + cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 + cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 + cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA + cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF + cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 + cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 + cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 + cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 + cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 + cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 + // Unassigned uint16 = 0x00C6-FE + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF + // Unassigned uint16 = 0x01-55,* + cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 + // Unassigned uint16 = 0x5601 - 0xC000 + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A + cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 + cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E + cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F + cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 + cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 + cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 + cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 + cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B + cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C + cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B + cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C + cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D + cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E + cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F + cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 + cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 + cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 + cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 + cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 + cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 + cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 + cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 + cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 + cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 + cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA + cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF + // Unassigned uint16 = 0xC0B0-FF + // Unassigned uint16 = 0xC1-CB,* + // Unassigned uint16 = 0xCC00-A7 + cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 + cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 + cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA + cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB + cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC + cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD + cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE +) + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +// References: +// https://tools.ietf.org/html/rfc7540#appendix-A +// Reject cipher suites from Appendix A. +// "This list includes those cipher suites that do not +// offer an ephemeral key exchange and those that are +// based on the TLS null, stream or block cipher type" +func isBadCipher(cipher uint16) bool { + switch cipher { + case cipher_TLS_NULL_WITH_NULL_NULL, + cipher_TLS_RSA_WITH_NULL_MD5, + cipher_TLS_RSA_WITH_NULL_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_RSA_WITH_RC4_128_MD5, + cipher_TLS_RSA_WITH_RC4_128_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_RSA_WITH_IDEA_CBC_SHA, + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_RSA_WITH_DES_CBC_SHA, + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_DH_anon_WITH_RC4_128_MD5, + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_anon_WITH_DES_CBC_SHA, + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_SHA, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_RC4_128_SHA, + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_MD5, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, + cipher_TLS_KRB5_WITH_RC4_128_MD5, + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_PSK_WITH_NULL_SHA, + cipher_TLS_DHE_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_NULL_SHA256, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_PSK_WITH_RC4_128_SHA, + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_PSK_WITH_NULL_SHA256, + cipher_TLS_PSK_WITH_NULL_SHA384, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_NULL_SHA256, + cipher_TLS_DHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_NULL_SHA256, + cipher_TLS_RSA_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_NULL_SHA, + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_NULL_SHA, + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_AES_128_CCM, + cipher_TLS_RSA_WITH_AES_256_CCM, + cipher_TLS_RSA_WITH_AES_128_CCM_8, + cipher_TLS_RSA_WITH_AES_256_CCM_8, + cipher_TLS_PSK_WITH_AES_128_CCM, + cipher_TLS_PSK_WITH_AES_256_CCM, + cipher_TLS_PSK_WITH_AES_128_CCM_8, + cipher_TLS_PSK_WITH_AES_256_CCM_8: + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go new file mode 100644 index 0000000..f4d9b5e --- /dev/null +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -0,0 +1,282 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code's client connection pooling. + +package http2 + +import ( + "crypto/tls" + "net/http" + "sync" +) + +// ClientConnPool manages a pool of HTTP/2 client connections. +type ClientConnPool interface { + GetClientConn(req *http.Request, addr string) (*ClientConn, error) + MarkDead(*ClientConn) +} + +// clientConnPoolIdleCloser is the interface implemented by ClientConnPool +// implementations which can close their idle connections. +type clientConnPoolIdleCloser interface { + ClientConnPool + closeIdleConnections() +} + +var ( + _ clientConnPoolIdleCloser = (*clientConnPool)(nil) + _ clientConnPoolIdleCloser = noDialClientConnPool{} +) + +// TODO: use singleflight for dialing and addConnCalls? +type clientConnPool struct { + t *Transport + + mu sync.Mutex // TODO: maybe switch to RWMutex + // TODO: add support for sharing conns based on cert names + // (e.g. share conn for googleapis.com and appspot.com) + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls +} + +func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, dialOnMiss) +} + +const ( + dialOnMiss = true + noDialOnMiss = false +) + +// shouldTraceGetConn reports whether getClientConn should call any +// ClientTrace.GetConn hook associated with the http.Request. +// +// This complexity is needed to avoid double calls of the GetConn hook +// during the back-and-forth between net/http and x/net/http2 (when the +// net/http.Transport is upgraded to also speak http2), as well as support +// the case where x/net/http2 is being used directly. +func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool { + // If our Transport wasn't made via ConfigureTransport, always + // trace the GetConn hook if provided, because that means the + // http2 package is being used directly and it's the one + // dialing, as opposed to net/http. + if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok { + return true + } + // Otherwise, only use the GetConn hook if this connection has + // been used previously for other requests. For fresh + // connections, the net/http package does the dialing. + return !st.freshConn +} + +func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + if isConnectionCloseRequest(req) && dialOnMiss { + // It gets its own connection. + traceGetConn(req, addr) + const singleUse = true + cc, err := p.t.dialClientConn(addr, singleUse) + if err != nil { + return nil, err + } + return cc, nil + } + p.mu.Lock() + for _, cc := range p.conns[addr] { + if st := cc.idleState(); st.canTakeNewRequest { + if p.shouldTraceGetConn(st) { + traceGetConn(req, addr) + } + p.mu.Unlock() + return cc, nil + } + } + if !dialOnMiss { + p.mu.Unlock() + return nil, ErrNoCachedConn + } + traceGetConn(req, addr) + call := p.getStartDialLocked(addr) + p.mu.Unlock() + <-call.done + return call.res, call.err +} + +// dialCall is an in-flight Transport dial call to a host. +type dialCall struct { + p *clientConnPool + done chan struct{} // closed when done + res *ClientConn // valid after done is closed + err error // valid after done is closed +} + +// requires p.mu is held. +func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { + if call, ok := p.dialing[addr]; ok { + // A dial is already in-flight. Don't start another. + return call + } + call := &dialCall{p: p, done: make(chan struct{})} + if p.dialing == nil { + p.dialing = make(map[string]*dialCall) + } + p.dialing[addr] = call + go call.dial(addr) + return call +} + +// run in its own goroutine. +func (c *dialCall) dial(addr string) { + const singleUse = false // shared conn + c.res, c.err = c.p.t.dialClientConn(addr, singleUse) + close(c.done) + + c.p.mu.Lock() + delete(c.p.dialing, addr) + if c.err == nil { + c.p.addConnLocked(addr, c.res) + } + c.p.mu.Unlock() +} + +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + +func (p *clientConnPool) addConn(key string, cc *ClientConn) { + p.mu.Lock() + p.addConnLocked(key, cc) + p.mu.Unlock() +} + +// p.mu must be held +func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { + for _, v := range p.conns[key] { + if v == cc { + return + } + } + if p.conns == nil { + p.conns = make(map[string][]*ClientConn) + } + if p.keys == nil { + p.keys = make(map[*ClientConn][]string) + } + p.conns[key] = append(p.conns[key], cc) + p.keys[cc] = append(p.keys[cc], key) +} + +func (p *clientConnPool) MarkDead(cc *ClientConn) { + p.mu.Lock() + defer p.mu.Unlock() + for _, key := range p.keys[cc] { + vv, ok := p.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + p.conns[key] = newList + } else { + delete(p.conns, key) + } + } + delete(p.keys, cc) +} + +func (p *clientConnPool) closeIdleConnections() { + p.mu.Lock() + defer p.mu.Unlock() + // TODO: don't close a cc if it was just added to the pool + // milliseconds ago and has never been used. There's currently + // a small race window with the HTTP/1 Transport's integration + // where it can add an idle conn just before using it, and + // somebody else can concurrently call CloseIdleConns and + // break some caller's RoundTrip. + for _, vv := range p.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + // If we filtered it out, zero out the last item to prevent + // the GC from seeing it. + if len(in) != len(out) { + in[len(in)-1] = nil + } + return out +} + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 0000000..a3067f8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -0,0 +1,146 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) + +func getDataBufferChunk(size int64) []byte { + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } + } + return dataChunkPools[i].Get().([]byte) +} + +func putDataBufferChunk(p []byte) { + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } + } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go new file mode 100644 index 0000000..71f2c46 --- /dev/null +++ b/vendor/golang.org/x/net/http2/errors.go @@ -0,0 +1,133 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" +) + +// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. +type ErrCode uint32 + +const ( + ErrCodeNo ErrCode = 0x0 + ErrCodeProtocol ErrCode = 0x1 + ErrCodeInternal ErrCode = 0x2 + ErrCodeFlowControl ErrCode = 0x3 + ErrCodeSettingsTimeout ErrCode = 0x4 + ErrCodeStreamClosed ErrCode = 0x5 + ErrCodeFrameSize ErrCode = 0x6 + ErrCodeRefusedStream ErrCode = 0x7 + ErrCodeCancel ErrCode = 0x8 + ErrCodeCompression ErrCode = 0x9 + ErrCodeConnect ErrCode = 0xa + ErrCodeEnhanceYourCalm ErrCode = 0xb + ErrCodeInadequateSecurity ErrCode = 0xc + ErrCodeHTTP11Required ErrCode = 0xd +) + +var errCodeName = map[ErrCode]string{ + ErrCodeNo: "NO_ERROR", + ErrCodeProtocol: "PROTOCOL_ERROR", + ErrCodeInternal: "INTERNAL_ERROR", + ErrCodeFlowControl: "FLOW_CONTROL_ERROR", + ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", + ErrCodeStreamClosed: "STREAM_CLOSED", + ErrCodeFrameSize: "FRAME_SIZE_ERROR", + ErrCodeRefusedStream: "REFUSED_STREAM", + ErrCodeCancel: "CANCEL", + ErrCodeCompression: "COMPRESSION_ERROR", + ErrCodeConnect: "CONNECT_ERROR", + ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", + ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", +} + +func (e ErrCode) String() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("unknown error code 0x%x", uint32(e)) +} + +// ConnectionError is an error that results in the termination of the +// entire connection. +type ConnectionError ErrCode + +func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } + +// StreamError is an error that only affects one stream within an +// HTTP/2 connection. +type StreamError struct { + StreamID uint32 + Code ErrCode + Cause error // optional additional detail +} + +func streamError(id uint32, code ErrCode) StreamError { + return StreamError{StreamID: id, Code: code} +} + +func (e StreamError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) + } + return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) +} + +// 6.9.1 The Flow Control Window +// "If a sender receives a WINDOW_UPDATE that causes a flow control +// window to exceed this maximum it MUST terminate either the stream +// or the connection, as appropriate. For streams, [...]; for the +// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." +type goAwayFlowError struct{} + +func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } + +// connError represents an HTTP/2 ConnectionError error code, along +// with a string (for debugging) explaining why. +// +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(Code), after stashing away +// the Reason into the Framer's errDetail field, accessible via +// the (*Framer).ErrorDetail method. +type connError struct { + Code ErrCode // the ConnectionError error code + Reason string // additional reason +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go new file mode 100644 index 0000000..cea601f --- /dev/null +++ b/vendor/golang.org/x/net/http2/flow.go @@ -0,0 +1,50 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Flow control + +package http2 + +// flow is the flow control window's size. +type flow struct { + // n is the number of DATA bytes we're allowed to send. + // A flow is kept both on a conn and a per-stream. + n int32 + + // conn points to the shared connection-level flow that is + // shared by all streams on that conn. It is nil for the flow + // that's on the conn directly. + conn *flow +} + +func (f *flow) setConnFlow(cf *flow) { f.conn = cf } + +func (f *flow) available() int32 { + n := f.n + if f.conn != nil && f.conn.n < n { + n = f.conn.n + } + return n +} + +func (f *flow) take(n int32) { + if n > f.available() { + panic("internal error: took too much") + } + f.n -= n + if f.conn != nil { + f.conn.n -= n + } +} + +// add adds n bytes (positive or negative) to the flow control window. +// It returns false if the sum would exceed 2^31-1. +func (f *flow) add(n int32) bool { + sum := f.n + n + if (sum > n) == (f.n > 0) { + f.n = sum + return true + } + return false +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go new file mode 100644 index 0000000..b46791d --- /dev/null +++ b/vendor/golang.org/x/net/http2/frame.go @@ -0,0 +1,1614 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +const frameHeaderLen = 9 + +var padZeros = make([]byte, 255) // zeros for padding + +// A FrameType is a registered frame type as defined in +// http://http2.github.io/http2-spec/#rfc.section.11.2 +type FrameType uint8 + +const ( + FrameData FrameType = 0x0 + FrameHeaders FrameType = 0x1 + FramePriority FrameType = 0x2 + FrameRSTStream FrameType = 0x3 + FrameSettings FrameType = 0x4 + FramePushPromise FrameType = 0x5 + FramePing FrameType = 0x6 + FrameGoAway FrameType = 0x7 + FrameWindowUpdate FrameType = 0x8 + FrameContinuation FrameType = 0x9 +) + +var frameName = map[FrameType]string{ + FrameData: "DATA", + FrameHeaders: "HEADERS", + FramePriority: "PRIORITY", + FrameRSTStream: "RST_STREAM", + FrameSettings: "SETTINGS", + FramePushPromise: "PUSH_PROMISE", + FramePing: "PING", + FrameGoAway: "GOAWAY", + FrameWindowUpdate: "WINDOW_UPDATE", + FrameContinuation: "CONTINUATION", +} + +func (t FrameType) String() string { + if s, ok := frameName[t]; ok { + return s + } + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) +} + +// Flags is a bitmask of HTTP/2 flags. +// The meaning of flags varies depending on the frame type. +type Flags uint8 + +// Has reports whether f contains all (0 or more) flags in v. +func (f Flags) Has(v Flags) bool { + return (f & v) == v +} + +// Frame-specific FrameHeader flag bits. +const ( + // Data Frame + FlagDataEndStream Flags = 0x1 + FlagDataPadded Flags = 0x8 + + // Headers Frame + FlagHeadersEndStream Flags = 0x1 + FlagHeadersEndHeaders Flags = 0x4 + FlagHeadersPadded Flags = 0x8 + FlagHeadersPriority Flags = 0x20 + + // Settings Frame + FlagSettingsAck Flags = 0x1 + + // Ping Frame + FlagPingAck Flags = 0x1 + + // Continuation Frame + FlagContinuationEndHeaders Flags = 0x4 + + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 +) + +var flagName = map[FrameType]map[Flags]string{ + FrameData: { + FlagDataEndStream: "END_STREAM", + FlagDataPadded: "PADDED", + }, + FrameHeaders: { + FlagHeadersEndStream: "END_STREAM", + FlagHeadersEndHeaders: "END_HEADERS", + FlagHeadersPadded: "PADDED", + FlagHeadersPriority: "PRIORITY", + }, + FrameSettings: { + FlagSettingsAck: "ACK", + }, + FramePing: { + FlagPingAck: "ACK", + }, + FrameContinuation: { + FlagContinuationEndHeaders: "END_HEADERS", + }, + FramePushPromise: { + FlagPushPromiseEndHeaders: "END_HEADERS", + FlagPushPromisePadded: "PADDED", + }, +} + +// a frameParser parses a frame given its FrameHeader and payload +// bytes. The length of payload will always equal fh.Length (which +// might be 0). +type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) + +var frameParsers = map[FrameType]frameParser{ + FrameData: parseDataFrame, + FrameHeaders: parseHeadersFrame, + FramePriority: parsePriorityFrame, + FrameRSTStream: parseRSTStreamFrame, + FrameSettings: parseSettingsFrame, + FramePushPromise: parsePushPromise, + FramePing: parsePingFrame, + FrameGoAway: parseGoAwayFrame, + FrameWindowUpdate: parseWindowUpdateFrame, + FrameContinuation: parseContinuationFrame, +} + +func typeFrameParser(t FrameType) frameParser { + if f := frameParsers[t]; f != nil { + return f + } + return parseUnknownFrame +} + +// A FrameHeader is the 9 byte header of all HTTP/2 frames. +// +// See http://http2.github.io/http2-spec/#FrameHeader +type FrameHeader struct { + valid bool // caller can access []byte fields in the Frame + + // Type is the 1 byte frame type. There are ten standard frame + // types, but extension frame types may be written by WriteRawFrame + // and will be returned by ReadFrame (as UnknownFrame). + Type FrameType + + // Flags are the 1 byte of 8 potential bit flags per frame. + // They are specific to the frame type. + Flags Flags + + // Length is the length of the frame, not including the 9 byte header. + // The maximum size is one byte less than 16MB (uint24), but only + // frames up to 16KB are allowed without peer agreement. + Length uint32 + + // StreamID is which stream this frame is for. Certain frames + // are not stream-specific, in which case this field is 0. + StreamID uint32 +} + +// Header returns h. It exists so FrameHeaders can be embedded in other +// specific frame types and implement the Frame interface. +func (h FrameHeader) Header() FrameHeader { return h } + +func (h FrameHeader) String() string { + var buf bytes.Buffer + buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { + buf.WriteString(h.Type.String()) + if h.Flags != 0 { + buf.WriteString(" flags=") + set := 0 + for i := uint8(0); i < 8; i++ { + if h.Flags&(1< 1 { + buf.WriteByte('|') + } + name := flagName[h.Type][Flags(1<>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) +} + +func (f *Framer) endWrite() error { + // Now that we know the final size, fill in the FrameHeader in + // the space previously reserved for it. Abuse append. + length := len(f.wbuf) - frameHeaderLen + if length >= (1 << 24) { + return ErrFrameTooLarge + } + _ = append(f.wbuf[:0], + byte(length>>16), + byte(length>>8), + byte(length)) + if f.logWrites { + f.logWrite() + } + + n, err := f.w.Write(f.wbuf) + if err == nil && n != len(f.wbuf) { + err = io.ErrShortWrite + } + return err +} + +func (f *Framer) logWrite() { + if f.debugFramer == nil { + f.debugFramerBuf = new(bytes.Buffer) + f.debugFramer = NewFramer(nil, f.debugFramerBuf) + f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + f.debugFramer.AllowIllegalReads = true + } + f.debugFramerBuf.Write(f.wbuf) + fr, err := f.debugFramer.ReadFrame() + if err != nil { + f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) + return + } + f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) +} + +func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } +func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } +func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } +func (f *Framer) writeUint32(v uint32) { + f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +const ( + minMaxFrameSize = 1 << 14 + maxFrameSize = 1<<24 - 1 +) + +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (fr *Framer) SetReuseFrames() { + if fr.frameCache != nil { + return + } + fr.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + +// NewFramer returns a Framer that writes frames to w and reads them from r. +func NewFramer(w io.Writer, r io.Reader) *Framer { + fr := &Framer{ + w: w, + r: r, + logReads: logFrameReads, + logWrites: logFrameWrites, + debugReadLoggerf: log.Printf, + debugWriteLoggerf: log.Printf, + } + fr.getReadBuf = func(size uint32) []byte { + if cap(fr.readBuf) >= int(size) { + return fr.readBuf[:size] + } + fr.readBuf = make([]byte, size) + return fr.readBuf + } + fr.SetMaxReadFrameSize(maxFrameSize) + return fr +} + +// SetMaxReadFrameSize sets the maximum size of a frame +// that will be read by a subsequent call to ReadFrame. +// It is the caller's responsibility to advertise this +// limit with a SETTINGS frame. +func (fr *Framer) SetMaxReadFrameSize(v uint32) { + if v > maxFrameSize { + v = maxFrameSize + } + fr.maxReadSize = v +} + +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + +// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer +// sends a frame that is larger than declared with SetMaxReadFrameSize. +var ErrFrameTooLarge = errors.New("http2: frame too large") + +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil + if fr.lastFrame != nil { + fr.lastFrame.invalidate() + } + fh, err := readFrameHeader(fr.headerBuf[:], fr.r) + if err != nil { + return nil, err + } + if fh.Length > fr.maxReadSize { + return nil, ErrFrameTooLarge + } + payload := fr.getReadBuf(fh.Length) + if _, err := io.ReadFull(fr.r, payload); err != nil { + return nil, err + } + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) + if err != nil { + if ce, ok := err.(connError); ok { + return nil, fr.connError(ce.Code, ce.Reason) + } + return nil, err + } + if err := fr.checkFrameOrder(f); err != nil { + return nil, err + } + if fr.logReads { + fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } + return f, nil +} + +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (fr *Framer) connError(code ErrCode, reason string) error { + fr.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (fr *Framer) checkFrameOrder(f Frame) error { + last := fr.lastFrame + fr.lastFrame = f + if fr.AllowIllegalReads { + return nil + } + + fh := f.Header() + if fr.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, fr.lastHeaderStream)) + } + if fh.StreamID != fr.lastHeaderStream { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, fr.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + fr.lastHeaderStream = 0 + } else { + fr.lastHeaderStream = fh.StreamID + } + } + + return nil +} + +// A DataFrame conveys arbitrary, variable-length sequences of octets +// associated with a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.1 +type DataFrame struct { + FrameHeader + data []byte +} + +func (f *DataFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +// Data returns the frame's data octets, not including any padding +// size byte or padding suffix bytes. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *DataFrame) Data() []byte { + f.checkValid() + return f.data +} + +func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} + } + f := fc.getDataFrame() + f.FrameHeader = fh + + var padSize byte + if fh.Flags.Has(FlagDataPadded) { + var err error + payload, padSize, err = readByte(payload) + if err != nil { + return nil, err + } + } + if int(padSize) > len(payload) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} + } + f.data = payload[:len(payload)-int(padSize)] + return f, nil +} + +var ( + errStreamID = errors.New("invalid stream ID") + errDepStreamID = errors.New("invalid dependent stream ID") + errPadLength = errors.New("pad length too large") + errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") +) + +func validStreamIDOrZero(streamID uint32) bool { + return streamID&(1<<31) == 0 +} + +func validStreamID(streamID uint32) bool { + return streamID != 0 && streamID&(1<<31) == 0 +} + +// WriteData writes a DATA frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { + return f.WriteDataPadded(streamID, endStream, data, nil) +} + +// WriteData writes a DATA frame with optional padding. +// +// If pad is nil, the padding bit is not sent. +// The length of pad must not exceed 255 bytes. +// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if len(pad) > 0 { + if len(pad) > 255 { + return errPadLength + } + if !f.AllowIllegalWrites { + for _, b := range pad { + if b != 0 { + // "Padding octets MUST be set to zero when sending." + return errPadBytes + } + } + } + } + var flags Flags + if endStream { + flags |= FlagDataEndStream + } + if pad != nil { + flags |= FlagDataPadded + } + f.startWrite(FrameData, flags, streamID) + if pad != nil { + f.wbuf = append(f.wbuf, byte(len(pad))) + } + f.wbuf = append(f.wbuf, data...) + f.wbuf = append(f.wbuf, pad...) + return f.endWrite() +} + +// A SettingsFrame conveys configuration parameters that affect how +// endpoints communicate, such as preferences and constraints on peer +// behavior. +// +// See http://http2.github.io/http2-spec/#SETTINGS +type SettingsFrame struct { + FrameHeader + p []byte +} + +func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { + // When this (ACK 0x1) bit is set, the payload of the + // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame with the ACK flag set and a length + // field value other than 0 MUST be treated as a + // connection error (Section 5.4.1) of type + // FRAME_SIZE_ERROR. + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + // SETTINGS frames always apply to a connection, + // never a single stream. The stream identifier for a + // SETTINGS frame MUST be zero (0x0). If an endpoint + // receives a SETTINGS frame whose stream identifier + // field is anything other than 0x0, the endpoint MUST + // respond with a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p)%6 != 0 { + // Expecting even number of 6 byte settings. + return nil, ConnectionError(ErrCodeFrameSize) + } + f := &SettingsFrame{FrameHeader: fh, p: p} + if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + // Values above the maximum flow control window size of 2^31 - 1 MUST + // be treated as a connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + return nil, ConnectionError(ErrCodeFlowControl) + } + return f, nil +} + +func (f *SettingsFrame) IsAck() bool { + return f.FrameHeader.Flags.Has(FlagSettingsAck) +} + +func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) { + f.checkValid() + for i := 0; i < f.NumSettings(); i++ { + if s := f.Setting(i); s.ID == id { + return s.Val, true + } + } + return 0, false +} + +// Setting returns the setting from the frame at the given 0-based index. +// The index must be >= 0 and less than f.NumSettings(). +func (f *SettingsFrame) Setting(i int) Setting { + buf := f.p + return Setting{ + ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])), + Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]), + } +} + +func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 } + +// HasDuplicates reports whether f contains any duplicate setting IDs. +func (f *SettingsFrame) HasDuplicates() bool { + num := f.NumSettings() + if num == 0 { + return false + } + // If it's small enough (the common case), just do the n^2 + // thing and avoid a map allocation. + if num < 10 { + for i := 0; i < num; i++ { + idi := f.Setting(i).ID + for j := i + 1; j < num; j++ { + idj := f.Setting(j).ID + if idi == idj { + return true + } + } + } + return false + } + seen := map[SettingID]bool{} + for i := 0; i < num; i++ { + id := f.Setting(i).ID + if seen[id] { + return true + } + seen[id] = true + } + return false +} + +// ForeachSetting runs fn for each setting. +// It stops and returns the first error. +func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { + f.checkValid() + for i := 0; i < f.NumSettings(); i++ { + if err := fn(f.Setting(i)); err != nil { + return err + } + } + return nil +} + +// WriteSettings writes a SETTINGS frame with zero or more settings +// specified and the ACK bit not set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettings(settings ...Setting) error { + f.startWrite(FrameSettings, 0, 0) + for _, s := range settings { + f.writeUint16(uint16(s.ID)) + f.writeUint32(s.Val) + } + return f.endWrite() +} + +// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettingsAck() error { + f.startWrite(FrameSettings, FlagSettingsAck, 0) + return f.endWrite() +} + +// A PingFrame is a mechanism for measuring a minimal round trip time +// from the sender, as well as determining whether an idle connection +// is still functional. +// See http://http2.github.io/http2-spec/#rfc.section.6.7 +type PingFrame struct { + FrameHeader + Data [8]byte +} + +func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } + +func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if len(payload) != 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + f := &PingFrame{FrameHeader: fh} + copy(f.Data[:], payload) + return f, nil +} + +func (f *Framer) WritePing(ack bool, data [8]byte) error { + var flags Flags + if ack { + flags = FlagPingAck + } + f.startWrite(FramePing, flags, 0) + f.writeBytes(data[:]) + return f.endWrite() +} + +// A GoAwayFrame informs the remote peer to stop creating streams on this connection. +// See http://http2.github.io/http2-spec/#rfc.section.6.8 +type GoAwayFrame struct { + FrameHeader + LastStreamID uint32 + ErrCode ErrCode + debugData []byte +} + +// DebugData returns any debug data in the GOAWAY frame. Its contents +// are not defined. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *GoAwayFrame) DebugData() []byte { + f.checkValid() + return f.debugData +} + +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p) < 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + return &GoAwayFrame{ + FrameHeader: fh, + LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), + ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), + debugData: p[8:], + }, nil +} + +func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { + f.startWrite(FrameGoAway, 0, 0) + f.writeUint32(maxStreamID & (1<<31 - 1)) + f.writeUint32(uint32(code)) + f.writeBytes(debugData) + return f.endWrite() +} + +// An UnknownFrame is the frame type returned when the frame type is unknown +// or no specific frame type parser exists. +type UnknownFrame struct { + FrameHeader + p []byte +} + +// Payload returns the frame's payload (after the header). It is not +// valid to call this method after a subsequent call to +// Framer.ReadFrame, nor is it valid to retain the returned slice. +// The memory is owned by the Framer and is invalidated when the next +// frame is read. +func (f *UnknownFrame) Payload() []byte { + f.checkValid() + return f.p +} + +func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + return &UnknownFrame{fh, p}, nil +} + +// A WindowUpdateFrame is used to implement flow control. +// See http://http2.github.io/http2-spec/#rfc.section.6.9 +type WindowUpdateFrame struct { + FrameHeader + Increment uint32 // never read with high bit set +} + +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit + if inc == 0 { + // A receiver MUST treat the receipt of a + // WINDOW_UPDATE frame with an flow control window + // increment of 0 as a stream error (Section 5.4.2) of + // type PROTOCOL_ERROR; errors on the connection flow + // control window MUST be treated as a connection + // error (Section 5.4.1). + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + return &WindowUpdateFrame{ + FrameHeader: fh, + Increment: inc, + }, nil +} + +// WriteWindowUpdate writes a WINDOW_UPDATE frame. +// The increment value must be between 1 and 2,147,483,647, inclusive. +// If the Stream ID is zero, the window update applies to the +// connection as a whole. +func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { + // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." + if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { + return errors.New("illegal window increment value") + } + f.startWrite(FrameWindowUpdate, 0, streamID) + f.writeUint32(incr) + return f.endWrite() +} + +// A HeadersFrame is used to open a stream and additionally carries a +// header block fragment. +type HeadersFrame struct { + FrameHeader + + // Priority is set if FlagHeadersPriority is set in the FrameHeader. + Priority PriorityParam + + headerFragBuf []byte // not owned +} + +func (f *HeadersFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *HeadersFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) +} + +func (f *HeadersFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndStream) +} + +func (f *HeadersFrame) HasPriority() bool { + return f.FrameHeader.Flags.Has(FlagHeadersPriority) +} + +func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + hf := &HeadersFrame{ + FrameHeader: fh, + } + if fh.StreamID == 0 { + // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // is received whose stream identifier field is 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} + } + var padLength uint8 + if fh.Flags.Has(FlagHeadersPadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + if fh.Flags.Has(FlagHeadersPriority) { + var v uint32 + p, v, err = readUint32(p) + if err != nil { + return nil, err + } + hf.Priority.StreamDep = v & 0x7fffffff + hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set + p, hf.Priority.Weight, err = readByte(p) + if err != nil { + return nil, err + } + } + if len(p)-int(padLength) <= 0 { + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + hf.headerFragBuf = p[:len(p)-int(padLength)] + return hf, nil +} + +// HeadersFrameParam are the parameters for writing a HEADERS frame. +type HeadersFrameParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndStream indicates that the header block is the last that + // the endpoint will send for the identified stream. Setting + // this flag causes the stream to enter one of "half closed" + // states. + EndStream bool + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 + + // Priority, if non-zero, includes stream priority information + // in the HEADER frame. + Priority PriorityParam +} + +// WriteHeaders writes a single HEADERS frame. +// +// This is a low-level header writing method. Encoding headers and +// splitting them into any necessary CONTINUATION frames is handled +// elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteHeaders(p HeadersFrameParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagHeadersPadded + } + if p.EndStream { + flags |= FlagHeadersEndStream + } + if p.EndHeaders { + flags |= FlagHeadersEndHeaders + } + if !p.Priority.IsZero() { + flags |= FlagHeadersPriority + } + f.startWrite(FrameHeaders, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !p.Priority.IsZero() { + v := p.Priority.StreamDep + if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { + return errDepStreamID + } + if p.Priority.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Priority.Weight) + } + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// A PriorityFrame specifies the sender-advised priority of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.3 +type PriorityFrame struct { + FrameHeader + PriorityParam +} + +// PriorityParam are the stream prioritzation parameters. +type PriorityParam struct { + // StreamDep is a 31-bit stream identifier for the + // stream that this stream depends on. Zero means no + // dependency. + StreamDep uint32 + + // Exclusive is whether the dependency is exclusive. + Exclusive bool + + // Weight is the stream's zero-indexed weight. It should be + // set together with StreamDep, or neither should be set. Per + // the spec, "Add one to the value to obtain a weight between + // 1 and 256." + Weight uint8 +} + +func (p PriorityParam) IsZero() bool { + return p == PriorityParam{} +} + +func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} + } + if len(payload) != 5 { + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} + } + v := binary.BigEndian.Uint32(payload[:4]) + streamID := v & 0x7fffffff // mask off high bit + return &PriorityFrame{ + FrameHeader: fh, + PriorityParam: PriorityParam{ + Weight: payload[4], + StreamDep: streamID, + Exclusive: streamID != v, // was high bit set? + }, + }, nil +} + +// WritePriority writes a PRIORITY frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if !validStreamIDOrZero(p.StreamDep) { + return errDepStreamID + } + f.startWrite(FramePriority, 0, streamID) + v := p.StreamDep + if p.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Weight) + return f.endWrite() +} + +// A RSTStreamFrame allows for abnormal termination of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.4 +type RSTStreamFrame struct { + FrameHeader + ErrCode ErrCode +} + +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil +} + +// WriteRSTStream writes a RST_STREAM frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FrameRSTStream, 0, streamID) + f.writeUint32(uint32(code)) + return f.endWrite() +} + +// A ContinuationFrame is used to continue a sequence of header block fragments. +// See http://http2.github.io/http2-spec/#rfc.section.6.10 +type ContinuationFrame struct { + FrameHeader + headerFragBuf []byte +} + +func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } + return &ContinuationFrame{fh, p}, nil +} + +func (f *ContinuationFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *ContinuationFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) +} + +// WriteContinuation writes a CONTINUATION frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endHeaders { + flags |= FlagContinuationEndHeaders + } + f.startWrite(FrameContinuation, flags, streamID) + f.wbuf = append(f.wbuf, headerBlockFragment...) + return f.endWrite() +} + +// A PushPromiseFrame is used to initiate a server stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.6 +type PushPromiseFrame struct { + FrameHeader + PromiseID uint32 + headerFragBuf []byte // not owned +} + +func (f *PushPromiseFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *PushPromiseFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) +} + +func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + pp := &PushPromiseFrame{ + FrameHeader: fh, + } + if pp.StreamID == 0 { + // PUSH_PROMISE frames MUST be associated with an existing, + // peer-initiated stream. The stream identifier of a + // PUSH_PROMISE frame indicates the stream it is associated + // with. If the stream identifier field specifies the value + // 0x0, a recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + // The PUSH_PROMISE frame includes optional padding. + // Padding fields and flags are identical to those defined for DATA frames + var padLength uint8 + if fh.Flags.Has(FlagPushPromisePadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + + p, pp.PromiseID, err = readUint32(p) + if err != nil { + return + } + pp.PromiseID = pp.PromiseID & (1<<31 - 1) + + if int(padLength) > len(p) { + // like the DATA frame, error out if padding is longer than the body. + return nil, ConnectionError(ErrCodeProtocol) + } + pp.headerFragBuf = p[:len(p)-int(padLength)] + return pp, nil +} + +// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. +type PushPromiseParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + + // PromiseID is the required Stream ID which this + // Push Promises + PromiseID uint32 + + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 +} + +// WritePushPromise writes a single PushPromise Frame. +// +// As with Header Frames, This is the low level call for writing +// individual frames. Continuation frames are handled elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePushPromise(p PushPromiseParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagPushPromisePadded + } + if p.EndHeaders { + flags |= FlagPushPromiseEndHeaders + } + f.startWrite(FramePushPromise, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { + return errStreamID + } + f.writeUint32(p.PromiseID) + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// WriteRawFrame writes a raw frame. This can be used to write +// extension frames unknown to this package. +func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { + f.startWrite(t, flags, streamID) + f.writeBytes(payload) + return f.endWrite() +} + +func readByte(p []byte) (remain []byte, b byte, err error) { + if len(p) == 0 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[1:], p[0], nil +} + +func readUint32(p []byte) (remain []byte, v uint32, err error) { + if len(p) < 4 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[4:], binary.BigEndian.Uint32(p[:4]), nil +} + +type streamEnder interface { + StreamEnded() bool +} + +type headersEnder interface { + HeadersEnded() bool +} + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if VerboseLogs && fr.logReads { + fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) + } + if !httpguts.ValidHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validWireHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + if VerboseLogs { + log.Printf("http2: invalid pseudo headers: %v", err) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go new file mode 100644 index 0000000..3a13101 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go111.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go new file mode 100644 index 0000000..9933c9f --- /dev/null +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Defensive debug-only utility to track that functions run on the +// goroutine that they're supposed to. + +package http2 + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" +) + +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" + +type goroutineLock uint64 + +func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } + return goroutineLock(curGoroutineID()) +} + +func (g goroutineLock) check() { + if !DebugGoroutines { + return + } + if curGoroutineID() != uint64(g) { + panic("running on the wrong goroutine") + } +} + +func (g goroutineLock) checkNotOn() { + if !DebugGoroutines { + return + } + if curGoroutineID() == uint64(g) { + panic("running on the wrong goroutine") + } +} + +var goroutineSpace = []byte("goroutine ") + +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := parseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} + +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// parseUintBytes is like strconv.ParseUint, but using a []byte. +func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 0000000..c3ff3fa --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,88 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" + "sync" +) + +var ( + commonBuildOnce sync.Once + commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case + commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case +) + +func buildCommonHeaderMapsOnce() { + commonBuildOnce.Do(buildCommonHeaderMaps) +} + +func buildCommonHeaderMaps() { + common := []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } + commonLowerHeader = make(map[string]string, len(common)) + commonCanonHeader = make(map[string]string, len(common)) + for _, v := range common { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + buildCommonHeaderMapsOnce() + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 0000000..1565cf2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,240 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.table.init() + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true + } + + j, nameValueMatch := e.dynTab.table.search(f) + if nameValueMatch || (i == 0 && j != 0) { + return j + uint64(staticTable.len()), nameValueMatch + } + + return i, false +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 0000000..166788c --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,496 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + } + d.dynTab.table.init() + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + table headerFieldTable + size uint32 // in bytes + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +func (dt *dynamicTable) add(f HeaderField) { + dt.table.addEntry(f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff. +func (dt *dynamicTable) evict() { + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ + } + dt.table.evictOldest(n) +} + +func (d *Decoder) maxTableIndex() int { + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + // See Section 2.3.3. + if i == 0 { + return + } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } + if i > uint64(d.maxTableIndex()) { + return + } + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the + // beginning of the first header block following the change to the dynamic table size. + if d.dynTab.size > 0 { + return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")} + } + + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 0000000..b412a96 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,222 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + rootHuffmanNode := getRootHuffmanNode() + n := rootHuffmanNode + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } else { + cbits -= 8 + } + } + } + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { + break + } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1< 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 0000000..a66cfbe --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,479 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 0000000..bdaba1d --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,384 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +// +package http2 // import "golang.org/x/net/http2" + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httpguts.ValidHeaderName for the base rules. +// +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httpguts.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +func httpCodeString(code int) string { + switch code { + case 200: + return "200" + case 404: + return "404" + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go new file mode 100644 index 0000000..161bca7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -0,0 +1,20 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 0000000..a614009 --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,163 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.b == nil { + return 0 + } + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b != nil && p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + p.b = nil + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 0000000..b57b6e2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2901 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } + if conf == nil { + conf = new(Server) + } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if h1, h2 := s, conf; h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } + } + s.RegisterOnShutdown(conf.state.startGracefulShutdown) + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + switch cs { + case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { + ctx, cancel = context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, http.ServerContextKey, hs) + } + return +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + baseCtx context.Context + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + writeSched WriteScheduler + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx context.Context + cancelCtx func() + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://tools.ietf.org/html/rfc7540#section-5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + buildCommonHeaderMapsOnce() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(FrameWriteRequest{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + }, + }) + sc.unackedSettings++ + + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + + loopNum := 0 + for { + loopNum++ + select { + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer != nil { + settingsTimer.Stop() + settingsTimer = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } + } + + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) + } + } +} + +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + +var errPrefaceTimeout = errors.New("timeout waiting for client preface") + +// readPreface reads the ClientPreface greeting from the peer or +// returns errPrefaceTimeout on timeout, or an error if the greeting +// is invalid. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errPrefaceTimeout + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wr: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { + sc.serveG.check() + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.Push(wr) + } + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wr (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wr.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) + } + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.writingFrameAsync = false + + wr := res.wr + + if writeEndsStream(wr.write) { + st := wr.stream + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } + } + + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame || sc.inFrameScheduleLoop { + return + } + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break + } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. +func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +// After sending GOAWAY, the connection will close after goAwayTimeout. +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAway(ErrCodeNo) +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(FrameWriteRequest{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.resetQueued = true + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return streamError(f.StreamID, ErrCodeFlowControl) + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- + } + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdownInternal() + } + } + if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.CloseStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if f.NumSettings() > 100 || f.HasDuplicates() { + // This isn't actually in the spec, but hang up on + // suspiciously large settings frames or those with + // duplicate entries. + return ConnectionError(ErrCodeProtocol) + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + // RFC 7540, sec 6.1: If a DATA frame is received whose stream is not in + // "open" or "half-closed (local)" state, the recipient MUST respond with a + // stream error (Section 5.4.2) of type STREAM_CLOSED. + if state == stateClosed { + return streamError(id, ErrCodeStreamClosed) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the + // value of a content-length header field does not equal the sum of the + // DATA frame payload lengths that form the body. + return streamError(id, ErrCodeProtocol) + } + if f.Length > 0 { + // Check whether the client has flow control quota. + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) + } + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdownInternal() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://tools.ietf.org/html/rfc7540#section-5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } + // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than + // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in + // this state, it MUST respond with a stream error (Section 5.4.2) of + // type STREAM_CLOSED. + if st.state == stateHalfClosedRemote { + return streamError(id, ErrCodeStreamClosed) + } + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxClientStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxClientStreamID = id + + if sc.idleTimer != nil { + sc.idleTimer.Stop() + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { + if sc.unackedSettings == 0 { + // They should know better. + return streamError(id, ErrCodeProtocol) + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) + } + + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !httpguts.ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) + return nil +} + +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") + } + + ctx, cancelCtx := context.WithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } + + isConnect := rp.method == "CONNECT" + if isConnect { + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + bodyOpen := !f.StreamEnded() + if rp.method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range rp.header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(rp.header, "Trailer") + + var url_ *url.URL + var requestURI string + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.path) + if err != nil { + return nil, nil, streamError(st.id, ErrCodeProtocol) + } + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + req := &http.Request{ + Method: rp.method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: rp.authority, + Body: body, + Trailer: trailer, + } + req = req.WithContext(st.ctx) + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + sc.writeFrameFromHandler(FrameWriteRequest{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + // Same as net/http: + if e != nil && e != http.ErrAbortHandler { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(FrameWriteRequest{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { + sc.serveG.checkNotOn() // NOT on + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(FrameWriteRequest{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. +type requestBody struct { + stream *stream + conn *serverConn + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil || b.sawEOF { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !httpguts.ValidTrailerHeader(k) { + // Forbidden by RFC 7230, section 4.1.2. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2), + // but respect "Connection" == "close" to mean sending a GOAWAY and tearing + // down the TCP connection when idle, like we do for HTTP/1. + // TODO: remove more Connection-specific header fields here, in addition + // to "Connection". + if _, ok := rws.snapHeader["Connection"]; ok { + v := rws.snapHeader.Get("Connection") + delete(rws.snapHeader, "Connection") + if v == "close" { + rws.conn.startGracefulShutdown() + } + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + rws.dirty = true + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + if err != nil { + rws.dirty = true + } + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 7230 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + cw := rws.stream.cw + go func() { + cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + checkWriteHeaderCode(code) + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler might call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + dirty := rws.dirty + rws.handlerDone = true + w.Flush() + w.rws = nil + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +var _ http.Pusher = (*responseWriter)(nil) + +func (w *responseWriter) Push(target string, opts *http.PushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + if opts == nil { + opts = new(http.PushOptions) + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := &startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.serveMsgCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg *startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiaed. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdownInternal() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 7230 section 7 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 0000000..2c9fe88 --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,2580 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math" + mathrand "math/rand" + "net" + "net/http" + "net/http/httptrace" + "net/textproto" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allowed. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It returns an error if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) + return err +} + +func configureTransport(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr("https", authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + singleUse bool // whether being used for a single http.Request + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closing bool + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + trace *httptrace.ClientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + num1xx uint8 // number of 1xx responses seen + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel waits for the user to cancel a request or for the done +// channel to be signaled. A non-nil error is returned only if the request was +// canceled. +func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { + ctx := req.Context() + if req.Cancel == nil && ctx.Done() == nil { + return nil + } + select { + case <-req.Cancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} + +var got1xxFuncForTests func(int, textproto.MIMEHeader) error + +// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, +// if any. It returns nil if not set or if the Go version is too old. +func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { + if fn := got1xxFuncForTests; fn != nil { + return fn + } + return traceGot1xxResponseFunc(cs.trace) +} + +// awaitRequestCancel waits for the user to cancel a request, its context to +// expire, or for the request to be done (any way it might be removed from the +// cc.streams map: peer reset, successful completion, TCP connection breakage, +// etc). If the request is canceled, then cs will be canceled and closed. +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + if err := awaitRequestCancel(req, cs.done); err != nil { + cs.cancelStream() + cs.bufPipe.CloseWithError(err) + } +} + +func (cs *clientStream) cancelStream() { + cc := cs.cc + cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cc.mu.Unlock() + + if !didReset { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc.forgetStreamID(cs.ID) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) getStartedWrite() bool { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return cs.startedWrite +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// isNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func isNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + for retry := 0; ; retry++ { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + traceGotConn(req, cc) + res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) + if err != nil && retry <= 6 { + if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { + // After the first retry, do exponential backoff with 10% jitter. + if retry == 0 { + continue + } + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + select { + case <-time.After(time.Second * time.Duration(backoff)): + continue + case <-req.Context().Done(): + return nil, req.Context().Err() + } + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") +) + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { + if !canRetryError(err) { + return nil, err + } + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || req.Body == http.NoBody { + return req, nil + } + + // If the request body can be reset back to its original + // state via the optional req.GetBody, do that. + if req.GetBody != nil { + // TODO: consider a req.Body.Close here? or audit that all caller paths do? + body, err := req.GetBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil + } + + // The Request.Body can't reset back to the beginning, but we + // don't seem to have started to read from it yet, so reuse + // the request directly. The "afterBodyWrite" means the + // bodyWrite process has started, which becomes true before + // the first Read. + if !afterBodyWrite { + return req, nil + } + + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) +} + +func canRetryError(err error) bool { + if err == errClientConnUnusable || err == errClientConnGotGoAway { + return true + } + if se, ok := err.(StreamError); ok { + return se.Code == ErrCodeRefusedStream + } + return false +} + +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *t.TLSClientConfig.Clone() + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return t.t1.ExpectContinueTimeout +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, false) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if t.AllowHTTP { + cc.nextStreamID = 3 + } + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } +} + +// CanTakeNewRequest reports whether the connection can take a new request, +// meaning it has not been closed or received or sent a GOAWAY. +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +// clientConnIdleState describes the suitability of a client +// connection to initiate a new RoundTrip request. +type clientConnIdleState struct { + canTakeNewRequest bool + freshConn bool // whether it's unused by any previous request +} + +func (cc *ClientConn) idleState() clientConnIdleState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.idleStateLocked() +} + +func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { + if cc.singleUse && cc.nextStreamID > 1 { + return + } + st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && + int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 + st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest + return +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + st := cc.idleStateLocked() + return st.canTakeNewRequest +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.tconn.Close() +} + +var shutdownEnterWaitStateHook = func() {} + +// Shutdown gracefully close the client connection, waiting for running streams to complete. +func (cc *ClientConn) Shutdown(ctx context.Context) error { + if err := cc.sendGoAway(); err != nil { + return err + } + // Wait for all in-flight streams to complete or connection to close + done := make(chan error, 1) + cancelled := false // guarded by cc.mu + go func() { + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if len(cc.streams) == 0 || cc.closed { + cc.closed = true + done <- cc.tconn.Close() + break + } + if cancelled { + break + } + cc.cond.Wait() + } + }() + shutdownEnterWaitStateHook() + select { + case err := <-done: + return err + case <-ctx.Done(): + cc.mu.Lock() + // Free the goroutine above + cancelled = true + cc.cond.Broadcast() + cc.mu.Unlock() + return ctx.Err() + } +} + +func (cc *ClientConn) sendGoAway() error { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.wmu.Lock() + defer cc.wmu.Unlock() + if cc.closing { + // GOAWAY sent already + return nil + } + // Send a graceful shutdown frame to server + maxStreamID := cc.nextStreamID + if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { + return err + } + if err := cc.bw.Flush(); err != nil { + return err + } + // Prevent new requests + cc.closing = true + return nil +} + +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + cc.mu.Lock() + defer cc.cond.Broadcast() + defer cc.mu.Unlock() + err := errors.New("http2: client connection force closed via ClientConn.Close") + for id, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + cs.bufPipe.CloseWithError(err) + delete(cc.streams, id) + } + cc.closed = true + return cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || req.Body == http.NoBody { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + resp, _, err := cc.roundTrip(req) + return resp, err +} + +func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { + if err := checkConnHeaders(req); err != nil { + return nil, false, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, false, err + } + hasTrailers := trailers != "" + + cc.mu.Lock() + if err := cc.awaitOpenSlotForRequest(req); err != nil { + cc.mu.Unlock() + return nil, false, err + } + + body := req.Body + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, false, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = httptrace.ContextClientTrace(req.Context()) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, false, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := req.Context() + + handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, false, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errTimeout + case <-ctx.Done(): + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), ctx.Err() + case <-req.Cancel: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.getStartedWrite(), cs.resetErr + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + return nil, cs.getStartedWrite(), err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// Must hold cc.mu. +func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { + var waitingForConn chan struct{} + var waitingForConnErr error // guarded by cc.mu + for { + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + if waitingForConn != nil { + close(waitingForConn) + } + return errClientConnUnusable + } + if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { + if waitingForConn != nil { + close(waitingForConn) + } + return nil + } + // Unfortunately, we cannot wait on a condition variable and channel at + // the same time, so instead, we spin up a goroutine to check if the + // request is canceled while we wait for a slot to open in the connection. + if waitingForConn == nil { + waitingForConn = make(chan struct{}) + go func() { + if err := awaitRequestCancel(req, waitingForConn); err != nil { + cc.mu.Lock() + waitingForConnErr = err + cc.cond.Broadcast() + cc.mu.Unlock() + } + }() + } + cc.pendingRequests++ + cc.cond.Wait() + cc.pendingRequests-- + if waitingForConnErr != nil { + return waitingForConnErr + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } + } + + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httpguts.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + f(":method", req.Method) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + trace := httptrace.ContextClientTrace(req.Context()) + traceHeaders := traceHasWroteHeaderField(trace) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name = strings.ToLower(name) + cc.writeHeader(name, value) + if traceHeaders { + traceWroteHeaderField(trace, name, value) + } + }) + + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { + cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range req.Trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filtered at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes(), nil +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() + delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } + close(cs.done) + // Wake up checkResetOrDone via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{cc: cc} + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range cc.streams { + cs.bufPipe.CloseWithError(err) // no-op if already closed + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, false); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + cs.cc.forgetStreamID(cs.ID) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + // + // Issue 22413: If there is no request body, we should close the + // stream before writing to cs.resc so that the stream is closed + // immediately once RoundTrip returns. + if cs.req.Body != nil { + defer cc.forgetStreamID(f.StreamID) + } else { + cc.forgetStreamID(f.StreamID) + } + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cc.forgetStreamID(cs.ID) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 1xx responses). +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("malformed response from server: missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + if statusCode >= 100 && statusCode <= 199 { + cs.num1xx++ + const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http + if cs.num1xx > max1xxResponses { + return nil, errors.New("http2: too many 1xx informational responses") + } + if fn := cs.get1xxTraceFunc(); fn != nil { + if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { + return nil, err + } + } + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + res.Uncompressed = true + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() + } + + cs.bufPipe.BreakWithError(errClosedResponseBody) + cc.forgetStreamID(cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } + return nil + } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } + cc.mu.Unlock() + + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + cs.bufPipe.closeWithErrorAndCode(err, code) + + select { + case cs.resc <- resAndError{err: err}: + default: + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) + case SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := streamError(cs.ID, f.ErrCode) + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +func (cc *ClientConn) Ping(ctx context.Context) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httpguts.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") +} + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// converting panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +// (The field is exported so it can be accessed via reflect from net/http; tested +// by TestNoDialH2RoundTripperType) +type noDialH2RoundTripper struct{ *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.Transport.RoundTrip(req) + if isNoCachedConnError(err) { + return nil, http.ErrSkipAltProtocol + } + return res, err +} + +func (t *Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + +func traceGetConn(req *http.Request, hostPort string) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GetConn == nil { + return + } + trace.GetConn(hostPort) +} + +func traceGotConn(req *http.Request, cc *ClientConn) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + cc.mu.Lock() + ci.Reused = cc.nextStreamID > 1 + ci.WasIdle = len(cc.streams) == 0 && ci.Reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *httptrace.ClientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *httptrace.ClientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *httptrace.ClientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 0000000..3849bc2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,365 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("writeEndsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + +type writeSettings []Setting + +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + ctx.Flush() // ignore error: we're hanging up on them anyway + return err +} + +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // upper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only if k is in keys. +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 0000000..4fe3073 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,242 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { + // write is the interface value that does the writing, once the + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. + write writeFramer + + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID + } + return 0 + } + return wr.stream.id +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { + return + } + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) + } + wr.write = nil // prevent use (assume it's tainted after wr.done send) +} + +// writeQueue is used by implementations of WriteScheduler. +type writeQueue struct { + s []FrameWriteRequest +} + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) +} + +func (q *writeQueue) shift() FrameWriteRequest { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr +} + +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false + } + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 0000000..848fed6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this funcion returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 0000000..36d7919 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle or closed, it's deleted from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for _, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 0000000..346fe44 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,732 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in http://www.unicode.org/reports/tr46. +// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissible ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of an IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see http://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + var isBidi bool + if p.mapping != nil { + s, isBidi, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // TODO: allow for a quick check of the tables data. + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil +} + +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. + if !norm.NFC.IsNormalString(s) { + return s, false, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return s, bidi, runeError(utf8.RuneError) + } + bidi = bidi || info(v).isBidi(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, bidi, runeError(r) + } + i += sz + } + return s, bidi, nil +} + +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false +} + +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { + var ( + b []byte + k int + ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + b = append(b, s[k:i]...) + b = append(b, "\ufffd"...) + k = len(s) + if err == nil { + err = runeError(utf8.RuneError) + } + break + } + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, bidi, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + // TODO: detect whether string may have to be normalized in the following + // loop. + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return runeError(utf8.RuneError) + } + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) (err error) { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 0000000..02c7d59 --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,203 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +func punyError(s string) error { return &labelError{s, "A3"} } + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", punyError(encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", punyError(encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", punyError(encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", punyError(encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", punyError(encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", punyError(encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", punyError(s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", punyError(s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables.go new file mode 100644 index 0000000..f910b26 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables.go @@ -0,0 +1,4557 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +var mappings string = "" + // Size: 8176 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} + +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa3 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xc9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xda + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xeb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x110 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11b + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x138 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x142 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x151 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x168 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x170 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x176 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x181 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x186 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x193 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x198 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a4 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1ae + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c5 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1cf + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x34, offset 0x1d2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1da + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1dd + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1ea + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f6 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x205 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x221 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x223 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x239 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x245 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x251 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x259 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25e + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x268 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x279 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28c + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x295 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ab + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b5 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2b9 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2bd + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x2c3 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2ca + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d0 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d8 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x301 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x305 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x307 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x30a + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x30c + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30f + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x319 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x31c + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x334 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x33b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x340 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x345 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x34b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x351 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x360 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x366 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x70, offset 0x379 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37e + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x386 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x39b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a3 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b4 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3cd + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3da + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e4 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e9 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fa + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3ff + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x401 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x405 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x407 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x40b + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x414 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x41a + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41e + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x438 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x440 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x446 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44d + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x452 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x456 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x45c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x46a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x475 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x47c + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x483 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x48a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x493 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x496 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x49b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4b2 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e9 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f5 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f8 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x500 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x51b + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xae, offset 0x522 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xaf, offset 0x52b + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x533 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb2, offset 0x548 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x555 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb4, offset 0x562 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb5, offset 0x56b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xb9, offset 0x590 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbb, offset 0x59f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbd, offset 0x5b0 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xbe, offset 0x5ba + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xbf, offset 0x5bd + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc1, offset 0x5cc + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc6, offset 0x5f6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc8, offset 0x609 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcc, offset 0x62d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xcd, offset 0x632 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xce, offset 0x635 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcf, offset 0x638 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd0, offset 0x63b + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd1, offset 0x642 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd2, offset 0x649 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd3, offset 0x64d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd4, offset 0x658 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd5, offset 0x65b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x661 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd7, offset 0x666 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd9, offset 0x66d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xda, offset 0x670 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xde, offset 0x67e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xdf, offset 0x688 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe0, offset 0x68b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe1, offset 0x68f + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x6aa + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xe4, offset 0x6ae + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe5, offset 0x6b3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6c1 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6ca + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xea, offset 0x6d5 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xeb, offset 0x6db + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xec, offset 0x6e3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xed, offset 0x6e7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xee, offset 0x6eb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xef, offset 0x6f1 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xf0, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xf1, offset 0x6fc + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf5, offset 0x719 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf6, offset 0x71c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf7, offset 0x720 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf8, offset 0x726 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf9, offset 0x72b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xfe, offset 0x740 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xff, offset 0x743 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x103, offset 0x75e + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x105, offset 0x774 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x106, offset 0x776 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go new file mode 100644 index 0000000..c4ef847 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trie.go @@ -0,0 +1,72 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} + +// Sparse block handling code. + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var idnaSparse = sparseBlocks{ + values: idnaSparseValues[:], + offset: idnaSparseOffset[:], +} + +// Don't use newIdnaTrie to avoid unconditional linking in of the table. +var trie = &idnaTrie{} + +// lookup determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go new file mode 100644 index 0000000..7a8cf88 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -0,0 +1,119 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// This file contains definitions for interpreting the trie value of the idna +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds information from the IDNA mapping table for a single rune. It is +// the value returned by a trie lookup. In most cases, all information fits in +// a 16-bit value. For mappings, this value may contain an index into a slice +// with the mapped string. Such mappings can consist of the actual mapped value +// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the +// input rune. This technique is used by the cases packages and reduces the +// table size significantly. +// +// The per-rune values have the following format: +// +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + catSmallMask = 0x3 + catBigMask = 0xF8 + indexShift = 3 + xorBit = 0x4 // interpret the index as an xor pattern + inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. + + joinShift = 8 + joinMask = 0x07 + + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 + modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 +) + +// A category corresponds to a category defined in the IDNA mapping table. +type category uint16 + +const ( + unknown category = 0 // not currently defined in unicode. + mapped category = 1 + disallowedSTD3Mapped category = 2 + deviation category = 3 +) + +const ( + valid category = 0x08 + validNV8 category = 0x18 + validXV8 category = 0x28 + disallowed category = 0x40 + disallowedSTD3Valid category = 0x80 + ignored category = 0xC0 +) + +// join types and additional rune information +const ( + joiningL = (iota + 1) + joiningD + joiningT + joiningR + + //the following types are derived during processing + joinZWJ + joinZWNJ + joinVirama + numJoinTypes +) + +func (c info) isMapped() bool { + return c&0x3 != 0 +} + +func (c info) category() category { + small := c & catSmallMask + if small != 0 { + return category(small) + } + return category(c & catBigMask) +} + +func (c info) joinType() info { + if c.isMapped() { + return 0 + } + return (c >> joinShift) & joinMask +} + +func (c info) isModifier() bool { + return c&(modifier|catSmallMask) == modifier +} + +func (c info) isViramaModifier() bool { + return c&(attributesMask|catSmallMask) == viramaModifier +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 0000000..685f0e7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 0000000..c646a69 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 0000000..9bf4286 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 0000000..f00d869 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1111 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: "/debug/requests"}}) + if pat != "" { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc("/debug/requests", Traces) + http.HandleFunc("/debug/events", Events) +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go new file mode 100644 index 0000000..d608191 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package trace + +import "golang.org/x/net/context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go new file mode 100644 index 0000000..df6e1fb --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package trace + +import "context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/sys/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/sys/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/sys/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/sys/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 0000000..da6b9e4 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,30 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "encoding/binary" + "runtime" +) + +// hostByteOrder returns binary.LittleEndian on little-endian machines and +// binary.BigEndian on big-endian machines. +func hostByteOrder() binary.ByteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "arm", "arm64", + "mipsle", "mips64le", "mips64p32le", + "ppc64le", + "riscv", "riscv64": + return binary.LittleEndian + case "armbe", "arm64be", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "sparc", "sparc64": + return binary.BigEndian + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 0000000..6b0034e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + _ CacheLinePad +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 0000000..7f2348b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 0000000..f7cb469 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build !gccgo + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c new file mode 100644 index 0000000..e363c7d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +// +// TODO: Replace with a better alternative: +// +// #include +// +// #pragma GCC target("xsave") +// +// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { +// unsigned long long x = _xgetbv(0); +// *eax = x & 0xffffffff; +// *edx = (x >> 32) & 0xffffffff; +// } +// +// Note that _xgetbv is defined starting with GCC 8. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + __asm(" xorl %%ecx, %%ecx\n" + " xgetbv" + : "=a"(*eax), "=d"(*edx)); +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go new file mode 100644 index 0000000..ba49b91 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 0000000..33b61e5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,55 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !amd64,!amd64p32,!386 + +package cpu + +import ( + "io/ioutil" +) + +const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + procAuxv = "/proc/self/auxv" + + uintSize = int(32 << (^uint(0) >> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func init() { + buf, err := ioutil.ReadFile(procAuxv) + if err != nil { + panic("read proc auxv failed: " + err.Error()) + } + + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + doinit() +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 0000000..fa7fb1b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 64 + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func doinit() { + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 0000000..f55e0c8 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 0000000..cda87b1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 0000000..dd1e76d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,arm64 + +package cpu + +const cacheLineSize = 64 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 0000000..a971b01 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 0000000..ce8a228 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 0000000..2b3ca2e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,57 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 + +package cpu + +const cacheLineSize = 64 + +func init() { + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + osSupportsAVX := false + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, _, _ := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<